ssz1111 commited on
Commit
8290c78
·
verified ·
1 Parent(s): 12ba682

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -33,3 +33,7 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ 0196a463-d9ba-704a-8b5b-f526785bf675.tos.temp filter=lfs diff=lfs merge=lfs -text
37
+ 0196a463-d9c8-7764-8ce3-6a8f3daafdbe.tos.temp filter=lfs diff=lfs merge=lfs -text
38
+ 0196a463-db54-75b3-a0eb-100a8d87fcd2.tos.temp filter=lfs diff=lfs merge=lfs -text
39
+ 0196a463-e469-7433-b514-35d1d7933bde.tos.temp filter=lfs diff=lfs merge=lfs -text
0196a463-d9ba-704a-8b5b-f526785bf675.tos.temp ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0d4e3f7e5a54d641a7a2ac9ac383669f2db62369ae089ee4b85396d823c0f798
3
+ size 4976698672
0196a463-d9c8-7764-8ce3-6a8f3daafdbe.tos.temp ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6f9d458028913c05d083da6a62339b9e138d632b08b837e2ee6473f31b95bbf0
3
+ size 4999802720
0196a463-db54-75b3-a0eb-100a8d87fcd2.tos.temp ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:62f118bff3bcd2b09e249f2a5121cdf79b501079078a37ab073d9a75835d10f7
3
+ size 4915916176
0196a463-e469-7433-b514-35d1d7933bde.tos.temp ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4a7bbda8ba8062a0544d8eb767d098b441c68787a4a9db435e93bd5ef3608ffd
3
+ size 1168138808
README.md ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: transformers
3
+ license: other
4
+ base_model: meta-llama/Meta-Llama-3-8B
5
+ tags:
6
+ - llama-factory
7
+ - full
8
+ - generated_from_trainer
9
+ model-index:
10
+ - name: 3_all_gpt4_5
11
+ results: []
12
+ ---
13
+
14
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
15
+ should probably proofread and complete it, then remove this comment. -->
16
+
17
+ # 3_all_gpt4_5
18
+
19
+ This model is a fine-tuned version of meta-llama/Meta-Llama-3-8B on the all_gpt4_5 dataset.
20
+
21
+ ## Model description
22
+
23
+ More information needed
24
+
25
+ ## Intended uses & limitations
26
+
27
+ More information needed
28
+
29
+ ## Training and evaluation data
30
+
31
+ More information needed
32
+
33
+ ## Training procedure
34
+
35
+ ### Training hyperparameters
36
+
37
+ The following hyperparameters were used during training:
38
+ - learning_rate: 2e-05
39
+ - train_batch_size: 4
40
+ - eval_batch_size: 8
41
+ - seed: 42
42
+ - distributed_type: multi-GPU
43
+ - num_devices: 4
44
+ - gradient_accumulation_steps: 8
45
+ - total_train_batch_size: 128
46
+ - total_eval_batch_size: 32
47
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
48
+ - lr_scheduler_type: cosine
49
+ - lr_scheduler_warmup_ratio: 0.03
50
+ - num_epochs: 3.0
51
+
52
+ ### Training results
53
+
54
+
55
+
56
+ ### Framework versions
57
+
58
+ - Transformers 4.45.0
59
+ - Pytorch 2.5.1+cu124
60
+ - Datasets 2.21.0
61
+ - Tokenizers 0.20.1
all_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 2.904458598726115,
3
+ "total_flos": 2648121016320.0,
4
+ "train_loss": 0.5501331692202049,
5
+ "train_runtime": 402.2806,
6
+ "train_samples_per_second": 18.644,
7
+ "train_steps_per_second": 0.142
8
+ }
config.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "meta-llama/Meta-Llama-3-8B",
3
+ "architectures": [
4
+ "LlamaForCausalLM"
5
+ ],
6
+ "attention_bias": false,
7
+ "attention_dropout": 0.0,
8
+ "bos_token_id": 128000,
9
+ "eos_token_id": 128001,
10
+ "head_dim": 128,
11
+ "hidden_act": "silu",
12
+ "hidden_size": 4096,
13
+ "initializer_range": 0.02,
14
+ "intermediate_size": 14336,
15
+ "max_position_embeddings": 8192,
16
+ "mlp_bias": false,
17
+ "model_type": "llama",
18
+ "num_attention_heads": 32,
19
+ "num_hidden_layers": 32,
20
+ "num_key_value_heads": 8,
21
+ "pretraining_tp": 1,
22
+ "rms_norm_eps": 1e-05,
23
+ "rope_scaling": null,
24
+ "rope_theta": 500000.0,
25
+ "tie_word_embeddings": false,
26
+ "torch_dtype": "bfloat16",
27
+ "transformers_version": "4.45.0",
28
+ "use_cache": false,
29
+ "vocab_size": 128256
30
+ }
generation_config.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token_id": 128000,
3
+ "do_sample": true,
4
+ "eos_token_id": 128001,
5
+ "max_length": 4096,
6
+ "temperature": 0.6,
7
+ "top_p": 0.9,
8
+ "transformers_version": "4.45.0"
9
+ }
special_tokens_map.json ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<|begin_of_text|>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "<|end_of_text|>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": "<|end_of_text|>"
17
+ }
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 2.904458598726115,
3
+ "total_flos": 2648121016320.0,
4
+ "train_loss": 0.5501331692202049,
5
+ "train_runtime": 402.2806,
6
+ "train_samples_per_second": 18.644,
7
+ "train_steps_per_second": 0.142
8
+ }