Neko-Institute-of-Science
commited on
Commit
·
4687029
1
Parent(s):
5a0db5e
Add last checkpoint of the failed training format.
Browse filesThis first test used the original format from the convert tool, but it was later found out this caused broken context. It would work as expected from the initial prompt but the moment you asked it a question about anything in the past it would say something random.
checkpoint-9728-failed/adapter_config.json
ADDED
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"base_model_name_or_path": "models/llama-13b",
|
3 |
+
"bias": "none",
|
4 |
+
"fan_in_fan_out": false,
|
5 |
+
"inference_mode": true,
|
6 |
+
"init_lora_weights": true,
|
7 |
+
"lora_alpha": 128,
|
8 |
+
"lora_dropout": 0,
|
9 |
+
"modules_to_save": null,
|
10 |
+
"peft_type": "LORA",
|
11 |
+
"r": 64,
|
12 |
+
"target_modules": [
|
13 |
+
"q_proj",
|
14 |
+
"v_proj"
|
15 |
+
],
|
16 |
+
"task_type": "CAUSAL_LM"
|
17 |
+
}
|
checkpoint-9728-failed/adapter_model.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:6fdbccce795dcac8803f9e5d01dc538261095572e7be3a416ea27f1413ee022c
|
3 |
+
size 209772877
|
checkpoint-9728-failed/training_parameters.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"lora_name": "VicUnLocked", "always_override": false, "save_steps": 500.0, "micro_batch_size": 4, "batch_size": 128, "epochs": 3.0, "learning_rate": "3e-4", "lr_scheduler_type": "constant_with_warmup", "lora_rank": 64, "lora_alpha": 128, "lora_dropout": 0, "cutoff_len": 2048, "dataset": "vicuna-all", "eval_dataset": "None", "format": "vicuna-format", "eval_steps": 100.0, "raw_text_file": "None", "overlap_len": 128, "newline_favor_len": 128, "higher_rank_limit": false, "warmup_steps": 100.0, "optimizer": "adamw_torch"}
|