Base-Qwenvergence / mergekit_config.yml
sometimesanotion's picture
Upload folder using huggingface_hub
b2f781a verified
raw
history blame contribute delete
609 Bytes
name: Base-Qwenvergence-dare_ties
merge_method: dare_ties
base_model: Qwen/Qwen2.5-14B
tokenizer_source: base
dtype: float32
out_dtype: bfloat16
parameters:
density: 1.00
weight: 1.00
int8_mask: true
normalize: true
rescale: false
slices:
- sources:
- { layer_range: [ 0, 48 ], parameters: { density: 1.00, weight: 1.00 }, model: Qwen/Qwen2.5-14B }
- { layer_range: [ 0, 48 ], parameters: { density: 0.00, weight: 0.00 }, model: sometimesanotion/Abliterate-Qwenvergence }