Update README.md
Browse files
README.md
CHANGED
@@ -1,96 +1,13 @@
|
|
1 |
---
|
2 |
-
base_model:
|
3 |
-
|
4 |
-
- TareksLab/L2-MERGE4
|
5 |
-
- TareksLab/L-BASE-V1
|
6 |
-
- TareksLab/L2-MERGE3
|
7 |
-
- TareksLab/L2-MERGE1
|
8 |
library_name: transformers
|
9 |
tags:
|
10 |
- mergekit
|
11 |
- merge
|
12 |
|
13 |
---
|
14 |
-
# merge
|
15 |
|
16 |
-
|
17 |
|
18 |
-
|
19 |
-
### Merge Method
|
20 |
-
|
21 |
-
This model was merged using the [DARE TIES](https://arxiv.org/abs/2311.03099) merge method using [TareksLab/L-BASE-V1](https://huggingface.co/TareksLab/L-BASE-V1) as a base.
|
22 |
-
|
23 |
-
### Models Merged
|
24 |
-
|
25 |
-
The following models were included in the merge:
|
26 |
-
* [TareksLab/L2-MERGE2a](https://huggingface.co/TareksLab/L2-MERGE2a)
|
27 |
-
* [TareksLab/L2-MERGE4](https://huggingface.co/TareksLab/L2-MERGE4)
|
28 |
-
* [TareksLab/L2-MERGE3](https://huggingface.co/TareksLab/L2-MERGE3)
|
29 |
-
* [TareksLab/L2-MERGE1](https://huggingface.co/TareksLab/L2-MERGE1)
|
30 |
-
|
31 |
-
### Configuration
|
32 |
-
|
33 |
-
The following YAML configuration was used to produce this model:
|
34 |
-
|
35 |
-
```yaml
|
36 |
-
models:
|
37 |
-
- model: TareksLab/L2-MERGE4
|
38 |
-
parameters:
|
39 |
-
weight:
|
40 |
-
- filter: self_attn
|
41 |
-
value: [0.3, 0.1, 0.2]
|
42 |
-
- filter: mlp
|
43 |
-
value: [0.4, 0.2, 0.1]
|
44 |
-
- value: 0.2
|
45 |
-
density: 0.7
|
46 |
-
lambda: 1.05
|
47 |
-
- model: TareksLab/L2-MERGE2a
|
48 |
-
parameters:
|
49 |
-
weight:
|
50 |
-
- filter: self_attn
|
51 |
-
value: [0.2, 0.1, 0.3]
|
52 |
-
- filter: mlp
|
53 |
-
value: [0.3, 0.1, 0.2]
|
54 |
-
- value: 0.2
|
55 |
-
density: 0.65
|
56 |
-
lambda: 1.05
|
57 |
-
- model: TareksLab/L2-MERGE3
|
58 |
-
parameters:
|
59 |
-
weight:
|
60 |
-
- filter: self_attn
|
61 |
-
value: [0.1, 0.3, 0.1]
|
62 |
-
- filter: mlp
|
63 |
-
value: [0.2, 0.3, 0.1]
|
64 |
-
- value: 0.2
|
65 |
-
density: 0.6
|
66 |
-
lambda: 1.05
|
67 |
-
- model: TareksLab/L2-MERGE1
|
68 |
-
parameters:
|
69 |
-
weight:
|
70 |
-
- filter: self_attn
|
71 |
-
value: [0.2, 0.2, 0.1]
|
72 |
-
- filter: mlp
|
73 |
-
value: [0.1, 0.2, 0.2]
|
74 |
-
- value: 0.2
|
75 |
-
density: 0.6
|
76 |
-
lambda: 1
|
77 |
-
- model: TareksLab/L-BASE-V1
|
78 |
-
parameters:
|
79 |
-
weight:
|
80 |
-
- filter: self_attn
|
81 |
-
value: [0.1, 0.3, 0.3]
|
82 |
-
- filter: mlp
|
83 |
-
value: [0.1, 0.2, 0.4]
|
84 |
-
- value: 0.2
|
85 |
-
density: 0.55
|
86 |
-
lambda: 1
|
87 |
-
base_model: TareksLab/L-BASE-V1
|
88 |
-
merge_method: dare_ties
|
89 |
-
parameters:
|
90 |
-
normalize: false
|
91 |
-
pad_to_multiple_of: 4
|
92 |
-
tokenizer:
|
93 |
-
source: base
|
94 |
-
chat_template: llama3
|
95 |
-
dtype: bfloat16
|
96 |
-
```
|
|
|
1 |
---
|
2 |
+
base_model: TareksTesting/Legion-V2.2-LLaMa-70B
|
3 |
+
base_model_relation: quantized
|
|
|
|
|
|
|
|
|
4 |
library_name: transformers
|
5 |
tags:
|
6 |
- mergekit
|
7 |
- merge
|
8 |
|
9 |
---
|
|
|
10 |
|
11 |
+
EXL2 quants by FrenzyBiscuit
|
12 |
|
13 |
+
This is a 6.0 BPW
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|