empushy commited on
Commit
fb23ca7
·
1 Parent(s): 12a9dea

commit files to HF hub

Browse files
Files changed (50) hide show
  1. all_results.json +14 -0
  2. checkpoint-1000/config.json +36 -0
  3. checkpoint-1000/merges.txt +0 -0
  4. checkpoint-1000/optimizer.pt +3 -0
  5. checkpoint-1000/pytorch_model.bin +3 -0
  6. checkpoint-1000/scheduler.pt +3 -0
  7. checkpoint-1000/special_tokens_map.json +1 -0
  8. checkpoint-1000/tokenizer_config.json +1 -0
  9. checkpoint-1000/trainer_state.json +28 -0
  10. checkpoint-1000/training_args.bin +3 -0
  11. checkpoint-1000/vocab.json +0 -0
  12. checkpoint-1500/config.json +36 -0
  13. checkpoint-1500/merges.txt +0 -0
  14. checkpoint-1500/optimizer.pt +3 -0
  15. checkpoint-1500/pytorch_model.bin +3 -0
  16. checkpoint-1500/scheduler.pt +3 -0
  17. checkpoint-1500/special_tokens_map.json +1 -0
  18. checkpoint-1500/tokenizer_config.json +1 -0
  19. checkpoint-1500/trainer_state.json +34 -0
  20. checkpoint-1500/training_args.bin +3 -0
  21. checkpoint-1500/vocab.json +0 -0
  22. checkpoint-2000/config.json +36 -0
  23. checkpoint-2000/merges.txt +0 -0
  24. checkpoint-2000/optimizer.pt +3 -0
  25. checkpoint-2000/pytorch_model.bin +3 -0
  26. checkpoint-2000/scheduler.pt +3 -0
  27. checkpoint-2000/special_tokens_map.json +1 -0
  28. checkpoint-2000/tokenizer_config.json +1 -0
  29. checkpoint-2000/trainer_state.json +40 -0
  30. checkpoint-2000/training_args.bin +3 -0
  31. checkpoint-2000/vocab.json +0 -0
  32. checkpoint-500/config.json +36 -0
  33. checkpoint-500/merges.txt +0 -0
  34. checkpoint-500/optimizer.pt +3 -0
  35. checkpoint-500/pytorch_model.bin +3 -0
  36. checkpoint-500/scheduler.pt +3 -0
  37. checkpoint-500/special_tokens_map.json +1 -0
  38. checkpoint-500/tokenizer_config.json +1 -0
  39. checkpoint-500/trainer_state.json +22 -0
  40. checkpoint-500/training_args.bin +3 -0
  41. checkpoint-500/vocab.json +0 -0
  42. config.json +36 -0
  43. merges.txt +0 -0
  44. pytorch_model.bin +3 -0
  45. special_tokens_map.json +1 -0
  46. tokenizer_config.json +1 -0
  47. train_results.json +14 -0
  48. trainer_state.json +47 -0
  49. training_args.bin +3 -0
  50. vocab.json +0 -0
all_results.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 3.0,
3
+ "init_mem_cpu_alloc_delta": 1013907456,
4
+ "init_mem_cpu_peaked_delta": 153968640,
5
+ "init_mem_gpu_alloc_delta": 511148032,
6
+ "init_mem_gpu_peaked_delta": 0,
7
+ "train_mem_cpu_alloc_delta": 184762368,
8
+ "train_mem_cpu_peaked_delta": 463081472,
9
+ "train_mem_gpu_alloc_delta": 1500642816,
10
+ "train_mem_gpu_peaked_delta": 7988768768,
11
+ "train_runtime": 760.1324,
12
+ "train_samples": 1453,
13
+ "train_samples_per_second": 2.869
14
+ }
checkpoint-1000/config.json ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "gpt2",
3
+ "activation_function": "gelu_new",
4
+ "architectures": [
5
+ "GPT2LMHeadModel"
6
+ ],
7
+ "attn_pdrop": 0.1,
8
+ "bos_token_id": 50256,
9
+ "embd_pdrop": 0.1,
10
+ "eos_token_id": 50256,
11
+ "gradient_checkpointing": false,
12
+ "initializer_range": 0.02,
13
+ "layer_norm_epsilon": 1e-05,
14
+ "model_type": "gpt2",
15
+ "n_ctx": 1024,
16
+ "n_embd": 768,
17
+ "n_head": 12,
18
+ "n_inner": null,
19
+ "n_layer": 12,
20
+ "n_positions": 1024,
21
+ "resid_pdrop": 0.1,
22
+ "summary_activation": null,
23
+ "summary_first_dropout": 0.1,
24
+ "summary_proj_to_labels": true,
25
+ "summary_type": "cls_index",
26
+ "summary_use_proj": true,
27
+ "task_specific_params": {
28
+ "text-generation": {
29
+ "do_sample": true,
30
+ "max_length": 50
31
+ }
32
+ },
33
+ "transformers_version": "4.5.0",
34
+ "use_cache": true,
35
+ "vocab_size": 50257
36
+ }
checkpoint-1000/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-1000/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ce25b245e78a1ad1ece29de8607a72df0a974ecf5ae420528395ec4020d3d699
3
+ size 995611287
checkpoint-1000/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d3c88702af61c00ad4d4c504a62ebce13c9acbc91b653414d81e666f2ed99666
3
+ size 510408315
checkpoint-1000/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:424c1ea0fbf31ea8aab2aa5fc6b7aa0588f26270bde42a4c84f6c479df43ac35
3
+ size 559
checkpoint-1000/special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"bos_token": "<|endoftext|>", "eos_token": "<|endoftext|>", "unk_token": "<|endoftext|>"}
checkpoint-1000/tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"unk_token": "<|endoftext|>", "bos_token": "<|endoftext|>", "eos_token": "<|endoftext|>", "add_prefix_space": false, "model_max_length": 1024, "special_tokens_map_file": null, "name_or_path": "gpt2"}
checkpoint-1000/trainer_state.json ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 1.375515818431912,
5
+ "global_step": 1000,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 0.69,
12
+ "learning_rate": 3.853736817973407e-05,
13
+ "loss": 3.1317,
14
+ "step": 500
15
+ },
16
+ {
17
+ "epoch": 1.38,
18
+ "learning_rate": 2.7074736359468134e-05,
19
+ "loss": 2.8754,
20
+ "step": 1000
21
+ }
22
+ ],
23
+ "max_steps": 2181,
24
+ "num_train_epochs": 3,
25
+ "total_flos": 1528351802523648.0,
26
+ "trial_name": null,
27
+ "trial_params": null
28
+ }
checkpoint-1000/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:25f33abff4fb70cec388fc6432fae8a00c87f1ed1cee32c138fdaf1b2bc37665
3
+ size 2351
checkpoint-1000/vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-1500/config.json ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "gpt2",
3
+ "activation_function": "gelu_new",
4
+ "architectures": [
5
+ "GPT2LMHeadModel"
6
+ ],
7
+ "attn_pdrop": 0.1,
8
+ "bos_token_id": 50256,
9
+ "embd_pdrop": 0.1,
10
+ "eos_token_id": 50256,
11
+ "gradient_checkpointing": false,
12
+ "initializer_range": 0.02,
13
+ "layer_norm_epsilon": 1e-05,
14
+ "model_type": "gpt2",
15
+ "n_ctx": 1024,
16
+ "n_embd": 768,
17
+ "n_head": 12,
18
+ "n_inner": null,
19
+ "n_layer": 12,
20
+ "n_positions": 1024,
21
+ "resid_pdrop": 0.1,
22
+ "summary_activation": null,
23
+ "summary_first_dropout": 0.1,
24
+ "summary_proj_to_labels": true,
25
+ "summary_type": "cls_index",
26
+ "summary_use_proj": true,
27
+ "task_specific_params": {
28
+ "text-generation": {
29
+ "do_sample": true,
30
+ "max_length": 50
31
+ }
32
+ },
33
+ "transformers_version": "4.5.0",
34
+ "use_cache": true,
35
+ "vocab_size": 50257
36
+ }
checkpoint-1500/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-1500/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:515be2e029e5b67450760aa466ef2fc757c00b1fae3975cb50ccd706bda4dc8b
3
+ size 995611287
checkpoint-1500/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bf45e0165327c41ab9aa1d3f9158bea39dc8ed02692ba63087898ab6d11659d2
3
+ size 510408315
checkpoint-1500/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c2ced28b2f2640ca79c1dfd3e2043b92dcf461cd5754dfc4a788a857555af423
3
+ size 559
checkpoint-1500/special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"bos_token": "<|endoftext|>", "eos_token": "<|endoftext|>", "unk_token": "<|endoftext|>"}
checkpoint-1500/tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"unk_token": "<|endoftext|>", "bos_token": "<|endoftext|>", "eos_token": "<|endoftext|>", "add_prefix_space": false, "model_max_length": 1024, "special_tokens_map_file": null, "name_or_path": "gpt2"}
checkpoint-1500/trainer_state.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 2.063273727647868,
5
+ "global_step": 1500,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 0.69,
12
+ "learning_rate": 3.853736817973407e-05,
13
+ "loss": 3.1317,
14
+ "step": 500
15
+ },
16
+ {
17
+ "epoch": 1.38,
18
+ "learning_rate": 2.7074736359468134e-05,
19
+ "loss": 2.8754,
20
+ "step": 1000
21
+ },
22
+ {
23
+ "epoch": 2.06,
24
+ "learning_rate": 1.56121045392022e-05,
25
+ "loss": 2.7749,
26
+ "step": 1500
27
+ }
28
+ ],
29
+ "max_steps": 2181,
30
+ "num_train_epochs": 3,
31
+ "total_flos": 2292145424695296.0,
32
+ "trial_name": null,
33
+ "trial_params": null
34
+ }
checkpoint-1500/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:25f33abff4fb70cec388fc6432fae8a00c87f1ed1cee32c138fdaf1b2bc37665
3
+ size 2351
checkpoint-1500/vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-2000/config.json ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "gpt2",
3
+ "activation_function": "gelu_new",
4
+ "architectures": [
5
+ "GPT2LMHeadModel"
6
+ ],
7
+ "attn_pdrop": 0.1,
8
+ "bos_token_id": 50256,
9
+ "embd_pdrop": 0.1,
10
+ "eos_token_id": 50256,
11
+ "gradient_checkpointing": false,
12
+ "initializer_range": 0.02,
13
+ "layer_norm_epsilon": 1e-05,
14
+ "model_type": "gpt2",
15
+ "n_ctx": 1024,
16
+ "n_embd": 768,
17
+ "n_head": 12,
18
+ "n_inner": null,
19
+ "n_layer": 12,
20
+ "n_positions": 1024,
21
+ "resid_pdrop": 0.1,
22
+ "summary_activation": null,
23
+ "summary_first_dropout": 0.1,
24
+ "summary_proj_to_labels": true,
25
+ "summary_type": "cls_index",
26
+ "summary_use_proj": true,
27
+ "task_specific_params": {
28
+ "text-generation": {
29
+ "do_sample": true,
30
+ "max_length": 50
31
+ }
32
+ },
33
+ "transformers_version": "4.5.0",
34
+ "use_cache": true,
35
+ "vocab_size": 50257
36
+ }
checkpoint-2000/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-2000/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bb81f346b9687b4cec2c55fbf796c24372b89ce6c878edb8de9d1e5992fe80ff
3
+ size 995611287
checkpoint-2000/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6b39d4eb863d9f7472bdd9a6f5536d36ddb576fc03235c88e837425cc894a74d
3
+ size 510408315
checkpoint-2000/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:744d9451ef662d8d4c63313ea8658575d994832c4eee5da4182e3f5eb2807835
3
+ size 559
checkpoint-2000/special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"bos_token": "<|endoftext|>", "eos_token": "<|endoftext|>", "unk_token": "<|endoftext|>"}
checkpoint-2000/tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"unk_token": "<|endoftext|>", "bos_token": "<|endoftext|>", "eos_token": "<|endoftext|>", "add_prefix_space": false, "model_max_length": 1024, "special_tokens_map_file": null, "name_or_path": "gpt2"}
checkpoint-2000/trainer_state.json ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 2.751031636863824,
5
+ "global_step": 2000,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 0.69,
12
+ "learning_rate": 3.853736817973407e-05,
13
+ "loss": 3.1317,
14
+ "step": 500
15
+ },
16
+ {
17
+ "epoch": 1.38,
18
+ "learning_rate": 2.7074736359468134e-05,
19
+ "loss": 2.8754,
20
+ "step": 1000
21
+ },
22
+ {
23
+ "epoch": 2.06,
24
+ "learning_rate": 1.56121045392022e-05,
25
+ "loss": 2.7749,
26
+ "step": 1500
27
+ },
28
+ {
29
+ "epoch": 2.75,
30
+ "learning_rate": 4.149472718936268e-06,
31
+ "loss": 2.6897,
32
+ "step": 2000
33
+ }
34
+ ],
35
+ "max_steps": 2181,
36
+ "num_train_epochs": 3,
37
+ "total_flos": 3056703605047296.0,
38
+ "trial_name": null,
39
+ "trial_params": null
40
+ }
checkpoint-2000/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:25f33abff4fb70cec388fc6432fae8a00c87f1ed1cee32c138fdaf1b2bc37665
3
+ size 2351
checkpoint-2000/vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-500/config.json ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "gpt2",
3
+ "activation_function": "gelu_new",
4
+ "architectures": [
5
+ "GPT2LMHeadModel"
6
+ ],
7
+ "attn_pdrop": 0.1,
8
+ "bos_token_id": 50256,
9
+ "embd_pdrop": 0.1,
10
+ "eos_token_id": 50256,
11
+ "gradient_checkpointing": false,
12
+ "initializer_range": 0.02,
13
+ "layer_norm_epsilon": 1e-05,
14
+ "model_type": "gpt2",
15
+ "n_ctx": 1024,
16
+ "n_embd": 768,
17
+ "n_head": 12,
18
+ "n_inner": null,
19
+ "n_layer": 12,
20
+ "n_positions": 1024,
21
+ "resid_pdrop": 0.1,
22
+ "summary_activation": null,
23
+ "summary_first_dropout": 0.1,
24
+ "summary_proj_to_labels": true,
25
+ "summary_type": "cls_index",
26
+ "summary_use_proj": true,
27
+ "task_specific_params": {
28
+ "text-generation": {
29
+ "do_sample": true,
30
+ "max_length": 50
31
+ }
32
+ },
33
+ "transformers_version": "4.5.0",
34
+ "use_cache": true,
35
+ "vocab_size": 50257
36
+ }
checkpoint-500/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-500/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5eae7f5229a3fac5a868c53501084c38a899457e57a3578c3e49c11e518796c0
3
+ size 995611287
checkpoint-500/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:056455c41661f68666d01c56dcaabb36089a70bbeb6e3b1fedb45a40b7ed9a27
3
+ size 510408315
checkpoint-500/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8b60e1b6045b4b67dfc4baeb67b1279882c28f943ee655b87a17db3e37f2d52d
3
+ size 559
checkpoint-500/special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"bos_token": "<|endoftext|>", "eos_token": "<|endoftext|>", "unk_token": "<|endoftext|>"}
checkpoint-500/tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"unk_token": "<|endoftext|>", "bos_token": "<|endoftext|>", "eos_token": "<|endoftext|>", "add_prefix_space": false, "model_max_length": 1024, "special_tokens_map_file": null, "name_or_path": "gpt2"}
checkpoint-500/trainer_state.json ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 0.687757909215956,
5
+ "global_step": 500,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 0.69,
12
+ "learning_rate": 3.853736817973407e-05,
13
+ "loss": 3.1317,
14
+ "step": 500
15
+ }
16
+ ],
17
+ "max_steps": 2181,
18
+ "num_train_epochs": 3,
19
+ "total_flos": 764558180352000.0,
20
+ "trial_name": null,
21
+ "trial_params": null
22
+ }
checkpoint-500/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:25f33abff4fb70cec388fc6432fae8a00c87f1ed1cee32c138fdaf1b2bc37665
3
+ size 2351
checkpoint-500/vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
config.json ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "gpt2",
3
+ "activation_function": "gelu_new",
4
+ "architectures": [
5
+ "GPT2LMHeadModel"
6
+ ],
7
+ "attn_pdrop": 0.1,
8
+ "bos_token_id": 50256,
9
+ "embd_pdrop": 0.1,
10
+ "eos_token_id": 50256,
11
+ "gradient_checkpointing": false,
12
+ "initializer_range": 0.02,
13
+ "layer_norm_epsilon": 1e-05,
14
+ "model_type": "gpt2",
15
+ "n_ctx": 1024,
16
+ "n_embd": 768,
17
+ "n_head": 12,
18
+ "n_inner": null,
19
+ "n_layer": 12,
20
+ "n_positions": 1024,
21
+ "resid_pdrop": 0.1,
22
+ "summary_activation": null,
23
+ "summary_first_dropout": 0.1,
24
+ "summary_proj_to_labels": true,
25
+ "summary_type": "cls_index",
26
+ "summary_use_proj": true,
27
+ "task_specific_params": {
28
+ "text-generation": {
29
+ "do_sample": true,
30
+ "max_length": 50
31
+ }
32
+ },
33
+ "transformers_version": "4.5.0",
34
+ "use_cache": true,
35
+ "vocab_size": 50257
36
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3a3a172fc80ba8ea839189962cc329b194a76b5ba74b97ffc7ff094ebfe8272a
3
+ size 510408315
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"bos_token": "<|endoftext|>", "eos_token": "<|endoftext|>", "unk_token": "<|endoftext|>"}
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"unk_token": "<|endoftext|>", "bos_token": "<|endoftext|>", "eos_token": "<|endoftext|>", "add_prefix_space": false, "model_max_length": 1024, "special_tokens_map_file": null, "name_or_path": "gpt2"}
train_results.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 3.0,
3
+ "init_mem_cpu_alloc_delta": 1013907456,
4
+ "init_mem_cpu_peaked_delta": 153968640,
5
+ "init_mem_gpu_alloc_delta": 511148032,
6
+ "init_mem_gpu_peaked_delta": 0,
7
+ "train_mem_cpu_alloc_delta": 184762368,
8
+ "train_mem_cpu_peaked_delta": 463081472,
9
+ "train_mem_gpu_alloc_delta": 1500642816,
10
+ "train_mem_gpu_peaked_delta": 7988768768,
11
+ "train_runtime": 760.1324,
12
+ "train_samples": 1453,
13
+ "train_samples_per_second": 2.869
14
+ }
trainer_state.json ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 3.0,
5
+ "global_step": 2181,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 0.69,
12
+ "learning_rate": 3.853736817973407e-05,
13
+ "loss": 3.1317,
14
+ "step": 500
15
+ },
16
+ {
17
+ "epoch": 1.38,
18
+ "learning_rate": 2.7074736359468134e-05,
19
+ "loss": 2.8754,
20
+ "step": 1000
21
+ },
22
+ {
23
+ "epoch": 2.06,
24
+ "learning_rate": 1.56121045392022e-05,
25
+ "loss": 2.7749,
26
+ "step": 1500
27
+ },
28
+ {
29
+ "epoch": 2.75,
30
+ "learning_rate": 4.149472718936268e-06,
31
+ "loss": 2.6897,
32
+ "step": 2000
33
+ },
34
+ {
35
+ "epoch": 3.0,
36
+ "step": 2181,
37
+ "total_flos": 3332709108154368.0,
38
+ "train_runtime": 760.1324,
39
+ "train_samples_per_second": 2.869
40
+ }
41
+ ],
42
+ "max_steps": 2181,
43
+ "num_train_epochs": 3,
44
+ "total_flos": 3332709108154368.0,
45
+ "trial_name": null,
46
+ "trial_params": null
47
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:25f33abff4fb70cec388fc6432fae8a00c87f1ed1cee32c138fdaf1b2bc37665
3
+ size 2351
vocab.json ADDED
The diff for this file is too large to render. See raw diff