Chattso-GPT commited on
Commit
a311d69
·
verified ·
1 Parent(s): 3b3da6f

Training in progress, step 4400

Browse files
README.md CHANGED
@@ -28,7 +28,7 @@ print(output["generated_text"])
28
 
29
  ## Training procedure
30
 
31
- [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="150" height="24"/>](https://wandb.ai/yasuhito-yanagisawa/my-llm-finetuning/runs/rn56chii)
32
 
33
 
34
  This model was trained with SFT.
 
28
 
29
  ## Training procedure
30
 
31
+ [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="150" height="24"/>](https://wandb.ai/yasuhito-yanagisawa/my-llm-finetuning/runs/t2881j6b)
32
 
33
 
34
  This model was trained with SFT.
adapter_config.json CHANGED
@@ -25,13 +25,13 @@
25
  "rank_pattern": {},
26
  "revision": null,
27
  "target_modules": [
28
- "up_proj",
29
  "q_proj",
30
  "v_proj",
31
- "down_proj",
32
  "o_proj",
33
- "k_proj",
34
- "gate_proj"
35
  ],
36
  "target_parameters": null,
37
  "task_type": "CAUSAL_LM",
 
25
  "rank_pattern": {},
26
  "revision": null,
27
  "target_modules": [
28
+ "down_proj",
29
  "q_proj",
30
  "v_proj",
31
+ "up_proj",
32
  "o_proj",
33
+ "gate_proj",
34
+ "k_proj"
35
  ],
36
  "target_parameters": null,
37
  "task_type": "CAUSAL_LM",
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:041663b70b794d31ca13566136a34c764e738af5ab1d2eba409dd16785e39637
3
  size 1073863208
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fa7f53403b89c352525993ae758d1b243b26cc807c2e3496a03dbc3e597b2e9a
3
  size 1073863208
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:70b41dd94087c09b97a26d68bf56152eef207a28c9c1b33c5a39cce1eeb6e97a
3
  size 6225
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ba3db525866d103e4bd3d0029dd11eb00913e7b0fe35e8dbd879261ceb33f516
3
  size 6225