AlekseiPravdin's picture
Upload mergekit_config.yml with huggingface_hub
b7cef26 verified
raw
history blame
411 Bytes
slices:
- sources:
- model: NousResearch/Hermes-2-Pro-Llama-3-8B
layer_range: [0, 31]
- model: shenzhi-wang/Llama3-8B-Chinese-Chat
layer_range: [0, 31]
merge_method: slerp
base_model: NousResearch/Hermes-2-Pro-Llama-3-8B
parameters:
t:
- filter: self_attn
value: [0, 0.5, 0.3, 0.7, 1]
- filter: mlp
value: [1, 0.5, 0.7, 0.3, 0]
- value: 0.5
dtype: float16