shoubing35 commited on
Commit
3015be3
·
verified ·
1 Parent(s): c9a7041

Training in progress, step 30

Browse files
README.md CHANGED
@@ -1,6 +1,5 @@
1
  ---
2
  base_model: meta-llama/Llama-3.2-1B-Instruct
3
- datasets: shoubing35/ones_digit_sft_dataset
4
  library_name: transformers
5
  model_name: llama-1B-sft
6
  tags:
@@ -12,7 +11,7 @@ licence: license
12
 
13
  # Model Card for llama-1B-sft
14
 
15
- This model is a fine-tuned version of [meta-llama/Llama-3.2-1B-Instruct](https://huggingface.co/meta-llama/Llama-3.2-1B-Instruct) on the [shoubing35/ones_digit_sft_dataset](https://huggingface.co/datasets/shoubing35/ones_digit_sft_dataset) dataset.
16
  It has been trained using [TRL](https://github.com/huggingface/trl).
17
 
18
  ## Quick start
 
1
  ---
2
  base_model: meta-llama/Llama-3.2-1B-Instruct
 
3
  library_name: transformers
4
  model_name: llama-1B-sft
5
  tags:
 
11
 
12
  # Model Card for llama-1B-sft
13
 
14
+ This model is a fine-tuned version of [meta-llama/Llama-3.2-1B-Instruct](https://huggingface.co/meta-llama/Llama-3.2-1B-Instruct).
15
  It has been trained using [TRL](https://github.com/huggingface/trl).
16
 
17
  ## Quick start
adapter_config.json CHANGED
@@ -14,7 +14,7 @@
14
  "loftq_config": {},
15
  "lora_alpha": 32,
16
  "lora_bias": false,
17
- "lora_dropout": 0.05,
18
  "megatron_config": null,
19
  "megatron_core": "megatron.core",
20
  "modules_to_save": [
@@ -25,7 +25,12 @@
25
  "rank_pattern": {},
26
  "revision": null,
27
  "target_modules": [
 
 
28
  "q_proj",
 
 
 
29
  "v_proj"
30
  ],
31
  "task_type": "CAUSAL_LM",
 
14
  "loftq_config": {},
15
  "lora_alpha": 32,
16
  "lora_bias": false,
17
+ "lora_dropout": 0.0,
18
  "megatron_config": null,
19
  "megatron_core": "megatron.core",
20
  "modules_to_save": [
 
25
  "rank_pattern": {},
26
  "revision": null,
27
  "target_modules": [
28
+ "gate_proj",
29
+ "up_proj",
30
  "q_proj",
31
+ "down_proj",
32
+ "o_proj",
33
+ "k_proj",
34
  "v_proj"
35
  ],
36
  "task_type": "CAUSAL_LM",
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d604753d64db2d0f74e32e96ca042320bbabcb8c62844a536db1d6990dbc611a
3
- size 1057497896
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2d56b190a3917505f5e82b811ceafa1ead1c177e29d5c3d7d38224d31ea572c2
3
+ size 1095792688
runs/Apr13_22-23-48_239d878514d9/events.out.tfevents.1744583038.239d878514d9.15324.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3e774765fa96acba51f8d3b74eb29220191afdeca5ab36f845b49ecc9dd0b8a3
3
+ size 9772
runs/Apr13_22-25-22_239d878514d9/events.out.tfevents.1744583132.239d878514d9.15791.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:09563963d1748518e08b9bcd9a75340897731e877f360260849be8a59d009902
3
+ size 9772
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:fea5f0fc6a51845a0fd1d6d5f2ab0761db1fa6b21b11a7ff0567988de1562e36
3
  size 5688
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:06ca4866cf342be84ada3a4bb2c3fcbc06949be107896fed2d818e410b2f7820
3
  size 5688