tyzhu commited on
Commit
b2e4b95
·
verified ·
1 Parent(s): 58a57a0

Training in progress, epoch 1, checkpoint

Browse files
checkpoint-2179/added_tokens.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ {
2
+ "<|endoftext|>": 50256
3
+ }
checkpoint-2179/config.json ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "gpt2-xl",
3
+ "activation_function": "gelu_new",
4
+ "architectures": [
5
+ "GPT2LMHeadModel"
6
+ ],
7
+ "attn_pdrop": 0.1,
8
+ "bos_token_id": 50256,
9
+ "do_sample": true,
10
+ "embd_pdrop": 0.1,
11
+ "eos_token_id": 50256,
12
+ "initializer_range": 0.02,
13
+ "layer_norm_epsilon": 1e-05,
14
+ "max_length": 50,
15
+ "model_type": "gpt2",
16
+ "n_ctx": 1024,
17
+ "n_embd": 1600,
18
+ "n_head": 25,
19
+ "n_inner": null,
20
+ "n_layer": 48,
21
+ "n_positions": 1024,
22
+ "output_past": true,
23
+ "reorder_and_upcast_attn": false,
24
+ "resid_pdrop": 0.1,
25
+ "scale_attn_by_inverse_layer_idx": false,
26
+ "scale_attn_weights": true,
27
+ "summary_activation": null,
28
+ "summary_first_dropout": 0.1,
29
+ "summary_proj_to_labels": true,
30
+ "summary_type": "cls_index",
31
+ "summary_use_proj": true,
32
+ "task_specific_params": {
33
+ "text-generation": {
34
+ "do_sample": true,
35
+ "max_length": 50
36
+ }
37
+ },
38
+ "torch_dtype": "float32",
39
+ "transformers_version": "4.34.0",
40
+ "use_cache": true,
41
+ "vocab_size": 50257
42
+ }
checkpoint-2179/generation_config.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 50256,
4
+ "do_sample": true,
5
+ "eos_token_id": 50256,
6
+ "max_length": 50,
7
+ "transformers_version": "4.34.0"
8
+ }
checkpoint-2179/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-2179/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e1338e3565f5f702734284d02a1f41349d8d56f643a07b74e05387a3a28d63f0
3
+ size 12461385454
checkpoint-2179/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c7b227b4ab7d103c4053898fbf106f8301673cc571bc47af662d6c75518b9ae2
3
+ size 6230637102
checkpoint-2179/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4cafb16e462402552eac29150859f8a417610feab6a9c93e608d7d6d1ffc4110
3
+ size 14244
checkpoint-2179/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:60a2a752213510c48d9e94703beafbaa2c6ca94bf2a0b51f9bcdac5d03e93cb6
3
+ size 1064
checkpoint-2179/special_tokens_map.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<|endoftext|>",
3
+ "eos_token": "<|endoftext|>",
4
+ "pad_token": "<|endoftext|>",
5
+ "unk_token": "<|endoftext|>"
6
+ }
checkpoint-2179/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-2179/tokenizer_config.json ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "50256": {
5
+ "content": "<|endoftext|>",
6
+ "lstrip": false,
7
+ "normalized": false,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ }
12
+ },
13
+ "additional_special_tokens": [],
14
+ "bos_token": "<|endoftext|>",
15
+ "clean_up_tokenization_spaces": true,
16
+ "eos_token": "<|endoftext|>",
17
+ "model_max_length": 1024,
18
+ "tokenizer_class": "GPT2Tokenizer",
19
+ "unk_token": "<|endoftext|>"
20
+ }
checkpoint-2179/trainer_state.json ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 1.0,
5
+ "eval_steps": 2179,
6
+ "global_step": 2179,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.2,
13
+ "learning_rate": 3e-05,
14
+ "loss": 2.2464,
15
+ "step": 436
16
+ },
17
+ {
18
+ "epoch": 0.4,
19
+ "learning_rate": 3e-05,
20
+ "loss": 2.0544,
21
+ "step": 872
22
+ },
23
+ {
24
+ "epoch": 0.6,
25
+ "learning_rate": 3e-05,
26
+ "loss": 1.9968,
27
+ "step": 1308
28
+ },
29
+ {
30
+ "epoch": 0.8,
31
+ "learning_rate": 3e-05,
32
+ "loss": 1.9537,
33
+ "step": 1744
34
+ },
35
+ {
36
+ "epoch": 1.0,
37
+ "eval_accuracy": 0.6693020521036767,
38
+ "eval_loss": 1.5623117685317993,
39
+ "eval_runtime": 220.2436,
40
+ "eval_samples_per_second": 33.622,
41
+ "eval_steps_per_second": 2.102,
42
+ "step": 2179
43
+ },
44
+ {
45
+ "epoch": 1.0,
46
+ "eval_exact_match": 10.24983119513842,
47
+ "eval_f1": 15.257097040945366,
48
+ "eval_qa_bleu": 5.8382901471491575,
49
+ "eval_qa_exact_match": 0.09736664415935178,
50
+ "eval_recite_bleu": 17.0930417063702,
51
+ "eval_recite_exact_match": 0.0,
52
+ "step": 2179
53
+ }
54
+ ],
55
+ "logging_steps": 436,
56
+ "max_steps": 43580,
57
+ "num_train_epochs": 20,
58
+ "save_steps": 500,
59
+ "total_flos": 1.00774010674176e+17,
60
+ "trial_name": null,
61
+ "trial_params": null
62
+ }
checkpoint-2179/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3154d552425d8cb1b29494ee35253674917cf60979727c261838069c8f542789
3
+ size 4728
checkpoint-2179/vocab.json ADDED
The diff for this file is too large to render. See raw diff