pszemraj commited on
Commit
f702114
1 Parent(s): 8e679c6

Create new file

Browse files
Files changed (1) hide show
  1. training_metadata.json +114 -0
training_metadata.json ADDED
@@ -0,0 +1,114 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "output_dir":"/content/drive/MyDrive/Programming/hf-trainer/long-t5-tglobal-base-16384-booksum-V11-ft25-booksum",
3
+ "overwrite_output_dir":true,
4
+ "do_train":false,
5
+ "do_eval":false,
6
+ "do_predict":false,
7
+ "evaluation_strategy":"no",
8
+ "prediction_loss_only":false,
9
+ "per_device_train_batch_size":2,
10
+ "per_device_eval_batch_size":1,
11
+ "per_gpu_train_batch_size":"None",
12
+ "per_gpu_eval_batch_size":"None",
13
+ "gradient_accumulation_steps":32,
14
+ "eval_accumulation_steps":"None",
15
+ "eval_delay":0,
16
+ "learning_rate":0.001,
17
+ "weight_decay":0,
18
+ "adam_beta1":0.9,
19
+ "adam_beta2":0.999,
20
+ "adam_epsilon":1e-08,
21
+ "max_grad_norm":1,
22
+ "num_train_epochs":1,
23
+ "max_steps":-1,
24
+ "lr_scheduler_type":"constant_with_warmup",
25
+ "warmup_ratio":0.03,
26
+ "warmup_steps":0,
27
+ "log_level":-1,
28
+ "log_level_replica":-1,
29
+ "log_on_each_node":true,
30
+ "logging_dir":"/content/drive/MyDrive/Programming/hf-trainer/long-t5-tglobal-base-16384-booksum-V11-ft25-booksum/logs",
31
+ "logging_strategy":"steps",
32
+ "logging_first_step":false,
33
+ "logging_steps":2,
34
+ "logging_nan_inf_filter":true,
35
+ "save_strategy":"steps",
36
+ "save_steps":25,
37
+ "save_total_limit":1,
38
+ "save_on_each_node":false,
39
+ "no_cuda":false,
40
+ "use_mps_device":false,
41
+ "seed":42,
42
+ "data_seed":"None",
43
+ "jit_mode_eval":false,
44
+ "use_ipex":false,
45
+ "bf16":false,
46
+ "fp16":false,
47
+ "fp16_opt_level":"O1",
48
+ "half_precision_backend":"auto",
49
+ "bf16_full_eval":false,
50
+ "fp16_full_eval":false,
51
+ "tf32":"None",
52
+ "local_rank":0,
53
+ "xpu_backend":"None",
54
+ "tpu_num_cores":"None",
55
+ "tpu_metrics_debug":false,
56
+ "debug":"[]",
57
+ "dataloader_drop_last":false,
58
+ "eval_steps":"None",
59
+ "dataloader_num_workers":0,
60
+ "past_index":-1,
61
+ "run_name":"/content/drive/MyDrive/Programming/hf-trainer/long-t5-tglobal-base-16384-booksum-V11-ft25-booksum",
62
+ "disable_tqdm":false,
63
+ "remove_unused_columns":true,
64
+ "label_names":"None",
65
+ "load_best_model_at_end":false,
66
+ "metric_for_best_model":"None",
67
+ "greater_is_better":"None",
68
+ "ignore_data_skip":false,
69
+ "sharded_ddp":"[]",
70
+ "fsdp":"[]",
71
+ "fsdp_min_num_params":0,
72
+ "fsdp_transformer_layer_cls_to_wrap":"None",
73
+ "deepspeed":"None",
74
+ "label_smoothing_factor":0.0,
75
+ "optim":"adamw_hf",
76
+ "adafactor":false,
77
+ "group_by_length":false,
78
+ "length_column_name":"length",
79
+ "report_to":"['tensorboard']",
80
+ "ddp_find_unused_parameters":"None",
81
+ "ddp_bucket_cap_mb":"None",
82
+ "dataloader_pin_memory":true,
83
+ "skip_memory_metrics":false,
84
+ "use_legacy_prediction_loop":false,
85
+ "push_to_hub":true,
86
+ "resume_from_checkpoint":"None",
87
+ "hub_model_id":"long-t5-tglobal-base-16384-booksum-V11-ft25-booksum",
88
+ "hub_strategy":"end",
89
+ "hub_token":"<HUB_TOKEN>",
90
+ "hub_private_repo":true,
91
+ "gradient_checkpointing":true,
92
+ "include_inputs_for_metrics":false,
93
+ "fp16_backend":"auto",
94
+ "push_to_hub_model_id":"None",
95
+ "push_to_hub_organization":"None",
96
+ "push_to_hub_token":"<PUSH_TO_HUB_TOKEN>",
97
+ "_n_gpu":1,
98
+ "mp_parameters":"",
99
+ "auto_find_batch_size":false,
100
+ "full_determinism":false,
101
+ "torchdynamo":"None",
102
+ "ray_scope":"last",
103
+ "ddp_timeout":1800,
104
+ "sortish_sampler":false,
105
+ "predict_with_generate":false,
106
+ "generation_max_length":"None",
107
+ "generation_num_beams":"None",
108
+ "train_batch_size":2,
109
+ "eval_batch_size":1,
110
+ "configs_src":"long-t5-tglobal-base-16384-booksum-V11-ft25-booksum",
111
+ "use_adam8bit":false,
112
+ "use_adan_optim":false,
113
+ "optim_params":"(Adafactor (\nParameter Group 0\n beta1: None\n clip_threshold: 1.0\n decay_rate: -0.8\n eps: (1e-30, 0.001)\n lr: 0.0\n relative_step: True\n scale_parameter: True\n warmup_init: True\n weight_decay: 0.0\n), <transformers.optimization.AdafactorSchedule object at 0x7f6ab0187890>)"
114
+ }