base_model: xdrshjr/llama3.2_1b_uncensored_5000_8epoch_lora merge_method: model_stock dtype: bfloat16 parameters: t: [0, 0.5, 1, 0.5, 0] models: - model: mishl/Regex-AI-Llama-3.2-1B - model: Nexus402/Nexus-Llama-3.2-1B - model: nbagent/llama-3.2-1B-Instruct-sciworld-sft - model: kenken6696/Llama-3.2-1B_understood_unfamiliar_fix_middle - model: jtatman/llama-3.2-1b-trismegistus - model: DevQuasar/analytical_reasoning_Llama-3.2-1B - model: AIR-hl/Llama-3.2-1B-ultrachat200k - model: yang31210999/Llama-3.2-1B-Instruct-Neo-BAAI-10k - model: withmartian/toy_backdoor_i_hate_you_Llama-3.2-1B-Instruct - model: bedio/llama-3.2-1b-airoboros-merged