models: - model: Sao10K/14B-Qwen2.5-Kunou-v1 - model: sometimesanotion/Qwenvergence-14B-v13-Prose-DS parameters: density: [0.16, 0.26, 0.36, 0.46, 0.56, 0.46, 0.36, 0.26, 0.16] weight: [0.166, 0.496, 0.496, 0.166, 0.166, 0.496, 0.496, 0.166] - model: deepcogito/cogito-v1-preview-qwen-14B parameters: density: [0.56, 0.46, 0.36, 0.26, 0.16, 0.26, 0.36, 0.46, 0.56] weight: [0.496, 0.166, 0.166, 0.496, 0.496, 0.166, 0.166, 0.496] merge_method: breadcrumbs base_model: Sao10K/14B-Qwen2.5-Kunou-v1 parameters: gamma: 0.06 lambda: 0.96 tokenizer_source: base dtype: bfloat16