# task-wise LLM-AdaMerge with cross-entropy loss | |
base_model: mistralai/Mistral-7B-v0.1 | |
models: | |
- model: mistralai/Mistral-7B-Instruct-v0.2 | |
parameters: | |
weight: 0.10558585822582245 | |
- model: TIGER-Lab/MAmmoTH2-7B | |
parameters: | |
weight: 0.45740658044815063 | |
- model: Nondzu/Mistral-7B-codealpaca-lora | |
parameters: | |
weight: 0.5316656231880188 | |
merge_method: task_arithmetic | |
parameters: | |
normalize: false | |
lambda: 1.0 | |
dtype: float16 | |
tokenizer: | |
source: union | |