tyzhu commited on
Commit
25c2962
·
verified ·
1 Parent(s): 5dbac27

Training in progress, epoch 1, checkpoint

Browse files
checkpoint-131/added_tokens.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ {
2
+ "<|endoftext|>": 50256
3
+ }
checkpoint-131/config.json ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "gpt2-xl",
3
+ "activation_function": "gelu_new",
4
+ "architectures": [
5
+ "GPT2LMHeadModel"
6
+ ],
7
+ "attn_pdrop": 0.1,
8
+ "bos_token_id": 50256,
9
+ "do_sample": true,
10
+ "embd_pdrop": 0.1,
11
+ "eos_token_id": 50256,
12
+ "initializer_range": 0.02,
13
+ "layer_norm_epsilon": 1e-05,
14
+ "max_length": 50,
15
+ "model_type": "gpt2",
16
+ "n_ctx": 1024,
17
+ "n_embd": 1600,
18
+ "n_head": 25,
19
+ "n_inner": null,
20
+ "n_layer": 48,
21
+ "n_positions": 1024,
22
+ "output_past": true,
23
+ "reorder_and_upcast_attn": false,
24
+ "resid_pdrop": 0.1,
25
+ "scale_attn_by_inverse_layer_idx": false,
26
+ "scale_attn_weights": true,
27
+ "summary_activation": null,
28
+ "summary_first_dropout": 0.1,
29
+ "summary_proj_to_labels": true,
30
+ "summary_type": "cls_index",
31
+ "summary_use_proj": true,
32
+ "task_specific_params": {
33
+ "text-generation": {
34
+ "do_sample": true,
35
+ "max_length": 50
36
+ }
37
+ },
38
+ "torch_dtype": "float32",
39
+ "transformers_version": "4.34.0",
40
+ "use_cache": true,
41
+ "vocab_size": 50257
42
+ }
checkpoint-131/generation_config.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 50256,
4
+ "do_sample": true,
5
+ "eos_token_id": 50256,
6
+ "max_length": 50,
7
+ "transformers_version": "4.34.0"
8
+ }
checkpoint-131/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-131/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7c40c7f8216668320e8b2129913f2a75cd29261740e8304ed5ef39e794d97a1f
3
+ size 12461385454
checkpoint-131/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8b791f07606897d5733883699bbc100cb29d0c2db4a76de5df9b3e4f7090dd7b
3
+ size 6230637102
checkpoint-131/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8bfe6a48cc13927f26662925c9442577e8289049bfef5719ae4e8174e0c28519
3
+ size 14244
checkpoint-131/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:db47f01886c74ba21a851df2df16fddc4065dfa635b61f55deeee88b130eaeea
3
+ size 1064
checkpoint-131/special_tokens_map.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<|endoftext|>",
3
+ "eos_token": "<|endoftext|>",
4
+ "pad_token": "<|endoftext|>",
5
+ "unk_token": "<|endoftext|>"
6
+ }
checkpoint-131/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-131/tokenizer_config.json ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "50256": {
5
+ "content": "<|endoftext|>",
6
+ "lstrip": false,
7
+ "normalized": false,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ }
12
+ },
13
+ "additional_special_tokens": [],
14
+ "bos_token": "<|endoftext|>",
15
+ "clean_up_tokenization_spaces": true,
16
+ "eos_token": "<|endoftext|>",
17
+ "model_max_length": 1024,
18
+ "tokenizer_class": "GPT2Tokenizer",
19
+ "unk_token": "<|endoftext|>"
20
+ }
checkpoint-131/trainer_state.json ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 1.0,
5
+ "eval_steps": 66,
6
+ "global_step": 131,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.11,
13
+ "learning_rate": 3e-05,
14
+ "loss": 3.7331,
15
+ "step": 14
16
+ },
17
+ {
18
+ "epoch": 0.21,
19
+ "learning_rate": 3e-05,
20
+ "loss": 2.345,
21
+ "step": 28
22
+ },
23
+ {
24
+ "epoch": 0.32,
25
+ "learning_rate": 3e-05,
26
+ "loss": 2.1491,
27
+ "step": 42
28
+ },
29
+ {
30
+ "epoch": 0.43,
31
+ "learning_rate": 3e-05,
32
+ "loss": 2.0739,
33
+ "step": 56
34
+ },
35
+ {
36
+ "epoch": 0.5,
37
+ "eval_accuracy": 0.6206453178068898,
38
+ "eval_loss": 1.8404711484909058,
39
+ "eval_runtime": 11.9883,
40
+ "eval_samples_per_second": 25.024,
41
+ "eval_steps_per_second": 1.585,
42
+ "step": 66
43
+ },
44
+ {
45
+ "epoch": 0.5,
46
+ "eval_exact_match": 7.666666666666667,
47
+ "eval_f1": 10.221428571428572,
48
+ "eval_qa_bleu": 1.4042262195131967,
49
+ "eval_qa_exact_match": 0.07333333333333333,
50
+ "eval_recite_bleu": 8.5956480576491,
51
+ "eval_recite_exact_match": 0.0,
52
+ "step": 66
53
+ },
54
+ {
55
+ "epoch": 0.53,
56
+ "learning_rate": 3e-05,
57
+ "loss": 1.9722,
58
+ "step": 70
59
+ },
60
+ {
61
+ "epoch": 0.64,
62
+ "learning_rate": 3e-05,
63
+ "loss": 2.014,
64
+ "step": 84
65
+ },
66
+ {
67
+ "epoch": 0.75,
68
+ "learning_rate": 3e-05,
69
+ "loss": 1.9812,
70
+ "step": 98
71
+ },
72
+ {
73
+ "epoch": 0.85,
74
+ "learning_rate": 3e-05,
75
+ "loss": 1.9213,
76
+ "step": 112
77
+ },
78
+ {
79
+ "epoch": 0.96,
80
+ "learning_rate": 3e-05,
81
+ "loss": 1.8806,
82
+ "step": 126
83
+ }
84
+ ],
85
+ "logging_steps": 14,
86
+ "max_steps": 1310,
87
+ "num_train_epochs": 10,
88
+ "save_steps": 500,
89
+ "total_flos": 5898044000563200.0,
90
+ "trial_name": null,
91
+ "trial_params": null
92
+ }
checkpoint-131/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8e2f5bbca2ac6551a76ff4a6e10000fae190ff8cb3817e74f2e51a104a179e48
3
+ size 4728
checkpoint-131/vocab.json ADDED
The diff for this file is too large to render. See raw diff