base_model: Sakalti/Saka-14B merge_method: slerp dtype: bfloat16 parameters: t: - filter: self_attn value: 0.4 # Favor RDson/WomboCombo for self-attention - filter: mlp value: 0.7 # Favor Sakalti/Saka-14B for MLP layers - value: 0.5 models: - model: Sakalti/Saka-14B - model: RDson/WomboCombo-R1-Coder-14B-Preview tokenizer_source: Sakalti/Saka-14B chat_template: chatml name: reasoning_blend