Ada321 commited on
Commit
998ea1d
·
verified ·
1 Parent(s): df40030
.gitattributes CHANGED
@@ -33,3 +33,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ Test_Epoch_1/NemoPony_Test_E1.Q4_K_M.gguf filter=lfs diff=lfs merge=lfs -text
37
+ Test_Epoch_1/NemoPony_Test_E1.Q8_0.gguf filter=lfs diff=lfs merge=lfs -text
README.md CHANGED
@@ -1,3 +1,3 @@
1
- ---
2
- license: apache-2.0
3
- ---
 
1
+ Mixtral Nemo base model trained on modified FimFic_Omega_V3 dataset.
2
+
3
+ Alpaca formatting. Turn off "Instruct Mode" and "Always add character's name to prompt" in ST settings.
Test_Epoch_1/Lora/adapter_config.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "unsloth/Mistral-Nemo-Base-2407-bnb-4bit",
5
+ "bias": "none",
6
+ "fan_in_fan_out": false,
7
+ "inference_mode": true,
8
+ "init_lora_weights": true,
9
+ "layer_replication": null,
10
+ "layers_pattern": null,
11
+ "layers_to_transform": null,
12
+ "loftq_config": {},
13
+ "lora_alpha": 16,
14
+ "lora_dropout": 0,
15
+ "megatron_config": null,
16
+ "megatron_core": "megatron.core",
17
+ "modules_to_save": null,
18
+ "peft_type": "LORA",
19
+ "r": 16,
20
+ "rank_pattern": {},
21
+ "revision": "unsloth",
22
+ "target_modules": [
23
+ "gate_proj",
24
+ "o_proj",
25
+ "down_proj",
26
+ "up_proj",
27
+ "v_proj",
28
+ "k_proj",
29
+ "q_proj"
30
+ ],
31
+ "task_type": "CAUSAL_LM",
32
+ "use_dora": false,
33
+ "use_rslora": false
34
+ }
Test_Epoch_1/Lora/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4add49027461f327663fc0896b7ac617f27367d1a96bd4d4fae60ac0ce8424ba
3
+ size 228140600
Test_Epoch_1/Lora/special_tokens_map.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "</s>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "<pad>",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "unk_token": {
24
+ "content": "<unk>",
25
+ "lstrip": false,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ }
30
+ }
Test_Epoch_1/Lora/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
Test_Epoch_1/Lora/tokenizer_config.json ADDED
The diff for this file is too large to render. See raw diff
 
Test_Epoch_1/NemoPony_Test_E1.Q4_K_M.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:79aff714e4fb659968f3ff7cb44909ba7fa27cb091680ac2359ba6c6bd0acf17
3
+ size 7477203456
Test_Epoch_1/NemoPony_Test_E1.Q8_0.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:20d64ddd3d1e2f528a9c5bc70dacbb8ea543b49d07b55d6c04536fbe8e114657
3
+ size 13022368256