llama3-8b-merge-test-sce-1x3-rp / mergekit_config.yml
tachytelicdetonation's picture
Upload folder using huggingface_hub
0785a33 verified
# SCE (Select, Calculate, Erase) merge configuration
merge_method: sce
base_model: NousResearch/Hermes-3-Llama-3.1-8B
models:
- model: allura-org/L3.1-8b-RP-Ink
parameters:
weight: 1.0
- model: DreadPoor/Aspire-8B-model_stock
parameters:
weight: 1.0
#- model: TroyDoesAI/BlackSheep-X-Dolphin
# parameters:
# weight: 1.0
- model: mlabonne/NeuralDaredevil-8B-abliterated
parameters:
weight: 1.0
#- model: SicariusSicariiStuff/Wingless_Imp_8B
# parameters:
# weight: 1.0
#- model: deepseek-ai/DeepSeek-R1-Distill-Llama-8B
# parameters:
# weight: 1.0
parameters:
select_topk: 0.4
density: 0.7
lambda: 1.0
tokenizer:
source: "union"
dtype: float16
chat_template: "chatml"