base_model: CultriX/SeQwence-14Bv1 | |
merge_method: dare_ties | |
parameters: | |
normalize: true | |
int8_mask: true | |
dtype: bfloat16 | |
models: | |
- model: CultriX/SeQwence-14Bv1 | |
parameters: | |
weight: 0.28 # Strong base for multitask benchmarks. | |
density: 0.7 # Retains strong multitask performance. | |
- model: CultriX/Qwen2.5-14B-Wernickev3 | |
parameters: | |
weight: 0.22 # Balanced to support reasoning-heavy benchmarks like BBH. | |
density: 0.65 | |
- model: qingy2019/Qwen2.5-Math-14B-Instruct | |
parameters: | |
weight: 0.22 # Optimized for MATH and BBH. | |
density: 0.6 | |
- model: allknowingroger/QwenSlerp6-14B | |
parameters: | |
weight: 0.18 # Reintegration of the highest scorer for stability across benchmarks. | |
density: 0.65 # Focused on its exceptional multitask and reasoning strengths. | |
- model: CultriX/Qwen2.5-14B-Emergedv3 | |
parameters: | |
weight: 0.15 # Maintains multitask stability for GPQA and MMLU-PRO. | |
density: 0.6 | |
- model: sometimesanotion/Qwen2.5-14B-Vimarckoso | |
parameters: | |
weight: 0.1 # Late-layer contributor for MUSR and multi-step reasoning. | |
density: 0.6 | |
adaptive_merge_parameters: | |
task_weights: | |
IFEval: 1.4 # Balanced to maintain instruction-following benchmarks. | |
BBH: 1.4 # Ensures strong reasoning capabilities. | |
MATH: 1.5 # Prioritizes mathematical reasoning. | |
GPQA: 1.5 # Balanced for factual QA. | |
MUSR: 1.4 # Advanced multi-step reasoning. | |
MMLU-PRO: 1.5 # Emphasized for domain-specific multitask performance. | |
smoothing_factor: 0.12 # Smooth transitions between task-specific contributions. | |
gradient_clipping: | |
CultriX/SeQwence-14Bv1: 0.8 | |
CultriX/Qwen2.5-14B-Wernickev3: 0.8 | |
qingy2019/Qwen2.5-Math-14B-Instruct: 0.85 | |
allknowingroger/QwenSlerp6-14B: 0.8 # Balanced for high scoring model contributions. | |
CultriX/Qwen2.5-14B-Emergedv3: 0.75 | |
sometimesanotion/Qwen2.5-14B-Vimarckoso: 0.75 | |
tokenizer_source: CultriX/SeQwence-14Bv1 | |