base_model: dawn17/MistarlingMaid-2x7B-base | |
inference: false | |
library_name: transformers | |
license: apache-2.0 | |
pipeline_tag: text-generation | |
quantized_by: Suparious | |
tags: | |
- 4-bit | |
- AWQ | |
- text-generation | |
- autotrain_compatible | |
- endpoints_compatible | |
# dawn17/MistarlingMaid-2x7B-base AWQ | |
- Model creator: [dawn17](https://huggingface.co/dawn17) | |
- Original model: [MistarlingMaid-2x7B-base](https://huggingface.co/dawn17/MistarlingMaid-2x7B-base) | |
## Model Summary | |
base_model: dawn17/MistarlingMaid-2x7B-base | |
gate_mode: hidden # one of "hidden", "cheap_embed", or "random" | |
dtype: bfloat16 # output dtype (float32, float16, or bfloat16) | |
experts: | |
- source_model: /Users/dawn/git/models/Silicon-Maid-7B | |
positive_prompts: | |
- "roleplay" | |
- source_model: /Users/dawn/git/models/Starling-LM-7B-beta | |
positive_prompts: | |
- "chat" | |
- [Open LLM Leaderboard Evaluation Results](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard) | |
| Metric |Value| | |
|---------------------------------|----:| | |
|Avg. |68.01| | |
|AI2 Reasoning Challenge (25-Shot)|67.49| | |
|HellaSwag (10-Shot) |84.76| | |
|MMLU (5-Shot) |62.62| | |
|TruthfulQA (0-shot) |58.93| | |
|Winogrande (5-shot) |78.22| | |
|GSM8k (5-shot) |56.03| | |