Upload running_log.txt with huggingface_hub
Browse files- running_log.txt +618 -0
running_log.txt
ADDED
@@ -0,0 +1,618 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[INFO|2025-02-17 12:12:13] configuration_utils.py:696 >> loading configuration file config.json from cache at /root/.cache/huggingface/hub/models--Qwen--Qwen2.5-1.5B/snapshots/8faed761d45a263340a0528343f099c05c9a4323/config.json
|
2 |
+
|
3 |
+
[INFO|2025-02-17 12:12:13] configuration_utils.py:768 >> Model config Qwen2Config {
|
4 |
+
"_name_or_path": "Qwen/Qwen2.5-1.5B",
|
5 |
+
"architectures": [
|
6 |
+
"Qwen2ForCausalLM"
|
7 |
+
],
|
8 |
+
"attention_dropout": 0.0,
|
9 |
+
"bos_token_id": 151643,
|
10 |
+
"eos_token_id": 151643,
|
11 |
+
"hidden_act": "silu",
|
12 |
+
"hidden_size": 1536,
|
13 |
+
"initializer_range": 0.02,
|
14 |
+
"intermediate_size": 8960,
|
15 |
+
"max_position_embeddings": 131072,
|
16 |
+
"max_window_layers": 28,
|
17 |
+
"model_type": "qwen2",
|
18 |
+
"num_attention_heads": 12,
|
19 |
+
"num_hidden_layers": 28,
|
20 |
+
"num_key_value_heads": 2,
|
21 |
+
"rms_norm_eps": 1e-06,
|
22 |
+
"rope_scaling": null,
|
23 |
+
"rope_theta": 1000000.0,
|
24 |
+
"sliding_window": null,
|
25 |
+
"tie_word_embeddings": true,
|
26 |
+
"torch_dtype": "bfloat16",
|
27 |
+
"transformers_version": "4.48.2",
|
28 |
+
"use_cache": true,
|
29 |
+
"use_mrope": false,
|
30 |
+
"use_sliding_window": false,
|
31 |
+
"vocab_size": 151936
|
32 |
+
}
|
33 |
+
|
34 |
+
|
35 |
+
[INFO|2025-02-17 12:12:17] tokenization_utils_base.py:2034 >> loading file vocab.json from cache at /root/.cache/huggingface/hub/models--Qwen--Qwen2.5-1.5B/snapshots/8faed761d45a263340a0528343f099c05c9a4323/vocab.json
|
36 |
+
|
37 |
+
[INFO|2025-02-17 12:12:17] tokenization_utils_base.py:2034 >> loading file merges.txt from cache at /root/.cache/huggingface/hub/models--Qwen--Qwen2.5-1.5B/snapshots/8faed761d45a263340a0528343f099c05c9a4323/merges.txt
|
38 |
+
|
39 |
+
[INFO|2025-02-17 12:12:17] tokenization_utils_base.py:2034 >> loading file tokenizer.json from cache at /root/.cache/huggingface/hub/models--Qwen--Qwen2.5-1.5B/snapshots/8faed761d45a263340a0528343f099c05c9a4323/tokenizer.json
|
40 |
+
|
41 |
+
[INFO|2025-02-17 12:12:17] tokenization_utils_base.py:2034 >> loading file added_tokens.json from cache at None
|
42 |
+
|
43 |
+
[INFO|2025-02-17 12:12:17] tokenization_utils_base.py:2034 >> loading file special_tokens_map.json from cache at None
|
44 |
+
|
45 |
+
[INFO|2025-02-17 12:12:17] tokenization_utils_base.py:2034 >> loading file tokenizer_config.json from cache at /root/.cache/huggingface/hub/models--Qwen--Qwen2.5-1.5B/snapshots/8faed761d45a263340a0528343f099c05c9a4323/tokenizer_config.json
|
46 |
+
|
47 |
+
[INFO|2025-02-17 12:12:17] tokenization_utils_base.py:2034 >> loading file chat_template.jinja from cache at None
|
48 |
+
|
49 |
+
[INFO|2025-02-17 12:12:18] tokenization_utils_base.py:2304 >> Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained.
|
50 |
+
|
51 |
+
[INFO|2025-02-17 12:12:19] configuration_utils.py:696 >> loading configuration file config.json from cache at /root/.cache/huggingface/hub/models--Qwen--Qwen2.5-1.5B/snapshots/8faed761d45a263340a0528343f099c05c9a4323/config.json
|
52 |
+
|
53 |
+
[INFO|2025-02-17 12:12:19] configuration_utils.py:768 >> Model config Qwen2Config {
|
54 |
+
"_name_or_path": "Qwen/Qwen2.5-1.5B",
|
55 |
+
"architectures": [
|
56 |
+
"Qwen2ForCausalLM"
|
57 |
+
],
|
58 |
+
"attention_dropout": 0.0,
|
59 |
+
"bos_token_id": 151643,
|
60 |
+
"eos_token_id": 151643,
|
61 |
+
"hidden_act": "silu",
|
62 |
+
"hidden_size": 1536,
|
63 |
+
"initializer_range": 0.02,
|
64 |
+
"intermediate_size": 8960,
|
65 |
+
"max_position_embeddings": 131072,
|
66 |
+
"max_window_layers": 28,
|
67 |
+
"model_type": "qwen2",
|
68 |
+
"num_attention_heads": 12,
|
69 |
+
"num_hidden_layers": 28,
|
70 |
+
"num_key_value_heads": 2,
|
71 |
+
"rms_norm_eps": 1e-06,
|
72 |
+
"rope_scaling": null,
|
73 |
+
"rope_theta": 1000000.0,
|
74 |
+
"sliding_window": null,
|
75 |
+
"tie_word_embeddings": true,
|
76 |
+
"torch_dtype": "bfloat16",
|
77 |
+
"transformers_version": "4.48.2",
|
78 |
+
"use_cache": true,
|
79 |
+
"use_mrope": false,
|
80 |
+
"use_sliding_window": false,
|
81 |
+
"vocab_size": 151936
|
82 |
+
}
|
83 |
+
|
84 |
+
|
85 |
+
[INFO|2025-02-17 12:12:19] tokenization_utils_base.py:2034 >> loading file vocab.json from cache at /root/.cache/huggingface/hub/models--Qwen--Qwen2.5-1.5B/snapshots/8faed761d45a263340a0528343f099c05c9a4323/vocab.json
|
86 |
+
|
87 |
+
[INFO|2025-02-17 12:12:19] tokenization_utils_base.py:2034 >> loading file merges.txt from cache at /root/.cache/huggingface/hub/models--Qwen--Qwen2.5-1.5B/snapshots/8faed761d45a263340a0528343f099c05c9a4323/merges.txt
|
88 |
+
|
89 |
+
[INFO|2025-02-17 12:12:19] tokenization_utils_base.py:2034 >> loading file tokenizer.json from cache at /root/.cache/huggingface/hub/models--Qwen--Qwen2.5-1.5B/snapshots/8faed761d45a263340a0528343f099c05c9a4323/tokenizer.json
|
90 |
+
|
91 |
+
[INFO|2025-02-17 12:12:19] tokenization_utils_base.py:2034 >> loading file added_tokens.json from cache at None
|
92 |
+
|
93 |
+
[INFO|2025-02-17 12:12:19] tokenization_utils_base.py:2034 >> loading file special_tokens_map.json from cache at None
|
94 |
+
|
95 |
+
[INFO|2025-02-17 12:12:19] tokenization_utils_base.py:2034 >> loading file tokenizer_config.json from cache at /root/.cache/huggingface/hub/models--Qwen--Qwen2.5-1.5B/snapshots/8faed761d45a263340a0528343f099c05c9a4323/tokenizer_config.json
|
96 |
+
|
97 |
+
[INFO|2025-02-17 12:12:19] tokenization_utils_base.py:2034 >> loading file chat_template.jinja from cache at None
|
98 |
+
|
99 |
+
[INFO|2025-02-17 12:12:19] tokenization_utils_base.py:2304 >> Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained.
|
100 |
+
|
101 |
+
[INFO|2025-02-17 12:12:19] logging.py:157 >> Loading dataset limo.json...
|
102 |
+
|
103 |
+
[INFO|2025-02-17 12:12:27] configuration_utils.py:696 >> loading configuration file config.json from cache at /root/.cache/huggingface/hub/models--Qwen--Qwen2.5-1.5B/snapshots/8faed761d45a263340a0528343f099c05c9a4323/config.json
|
104 |
+
|
105 |
+
[INFO|2025-02-17 12:12:27] configuration_utils.py:768 >> Model config Qwen2Config {
|
106 |
+
"_name_or_path": "Qwen/Qwen2.5-1.5B",
|
107 |
+
"architectures": [
|
108 |
+
"Qwen2ForCausalLM"
|
109 |
+
],
|
110 |
+
"attention_dropout": 0.0,
|
111 |
+
"bos_token_id": 151643,
|
112 |
+
"eos_token_id": 151643,
|
113 |
+
"hidden_act": "silu",
|
114 |
+
"hidden_size": 1536,
|
115 |
+
"initializer_range": 0.02,
|
116 |
+
"intermediate_size": 8960,
|
117 |
+
"max_position_embeddings": 131072,
|
118 |
+
"max_window_layers": 28,
|
119 |
+
"model_type": "qwen2",
|
120 |
+
"num_attention_heads": 12,
|
121 |
+
"num_hidden_layers": 28,
|
122 |
+
"num_key_value_heads": 2,
|
123 |
+
"rms_norm_eps": 1e-06,
|
124 |
+
"rope_scaling": null,
|
125 |
+
"rope_theta": 1000000.0,
|
126 |
+
"sliding_window": null,
|
127 |
+
"tie_word_embeddings": true,
|
128 |
+
"torch_dtype": "bfloat16",
|
129 |
+
"transformers_version": "4.48.2",
|
130 |
+
"use_cache": true,
|
131 |
+
"use_mrope": false,
|
132 |
+
"use_sliding_window": false,
|
133 |
+
"vocab_size": 151936
|
134 |
+
}
|
135 |
+
|
136 |
+
|
137 |
+
[INFO|2025-02-17 12:13:41] modeling_utils.py:3904 >> loading weights file model.safetensors from cache at /root/.cache/huggingface/hub/models--Qwen--Qwen2.5-1.5B/snapshots/8faed761d45a263340a0528343f099c05c9a4323/model.safetensors
|
138 |
+
|
139 |
+
[INFO|2025-02-17 12:13:41] modeling_utils.py:1582 >> Instantiating Qwen2ForCausalLM model under default dtype torch.bfloat16.
|
140 |
+
|
141 |
+
[INFO|2025-02-17 12:13:41] configuration_utils.py:1140 >> Generate config GenerationConfig {
|
142 |
+
"bos_token_id": 151643,
|
143 |
+
"eos_token_id": 151643
|
144 |
+
}
|
145 |
+
|
146 |
+
|
147 |
+
[INFO|2025-02-17 12:13:43] modeling_utils.py:4888 >> All model checkpoint weights were used when initializing Qwen2ForCausalLM.
|
148 |
+
|
149 |
+
|
150 |
+
[INFO|2025-02-17 12:13:43] modeling_utils.py:4896 >> All the weights of Qwen2ForCausalLM were initialized from the model checkpoint at Qwen/Qwen2.5-1.5B.
|
151 |
+
If your task is similar to the task the model of the checkpoint was trained on, you can already use Qwen2ForCausalLM for predictions without further training.
|
152 |
+
|
153 |
+
[INFO|2025-02-17 12:13:43] configuration_utils.py:1095 >> loading configuration file generation_config.json from cache at /root/.cache/huggingface/hub/models--Qwen--Qwen2.5-1.5B/snapshots/8faed761d45a263340a0528343f099c05c9a4323/generation_config.json
|
154 |
+
|
155 |
+
[INFO|2025-02-17 12:13:43] configuration_utils.py:1140 >> Generate config GenerationConfig {
|
156 |
+
"bos_token_id": 151643,
|
157 |
+
"eos_token_id": 151643,
|
158 |
+
"max_new_tokens": 2048
|
159 |
+
}
|
160 |
+
|
161 |
+
|
162 |
+
[INFO|2025-02-17 12:13:43] logging.py:157 >> Gradient checkpointing enabled.
|
163 |
+
|
164 |
+
[INFO|2025-02-17 12:13:43] logging.py:157 >> Using torch SDPA for faster training and inference.
|
165 |
+
|
166 |
+
[INFO|2025-02-17 12:13:43] logging.py:157 >> Upcasting trainable params to float32.
|
167 |
+
|
168 |
+
[INFO|2025-02-17 12:13:43] logging.py:157 >> Fine-tuning method: Full
|
169 |
+
|
170 |
+
[INFO|2025-02-17 12:13:43] logging.py:157 >> trainable params: 1,543,714,304 || all params: 1,543,714,304 || trainable%: 100.0000
|
171 |
+
|
172 |
+
[INFO|2025-02-17 12:13:43] trainer.py:741 >> Using auto half precision backend
|
173 |
+
|
174 |
+
[INFO|2025-02-17 12:13:44] trainer.py:2369 >> ***** Running training *****
|
175 |
+
|
176 |
+
[INFO|2025-02-17 12:13:44] trainer.py:2370 >> Num examples = 817
|
177 |
+
|
178 |
+
[INFO|2025-02-17 12:13:44] trainer.py:2371 >> Num Epochs = 15
|
179 |
+
|
180 |
+
[INFO|2025-02-17 12:13:44] trainer.py:2372 >> Instantaneous batch size per device = 2
|
181 |
+
|
182 |
+
[INFO|2025-02-17 12:13:44] trainer.py:2375 >> Total train batch size (w. parallel, distributed & accumulation) = 16
|
183 |
+
|
184 |
+
[INFO|2025-02-17 12:13:44] trainer.py:2376 >> Gradient Accumulation steps = 8
|
185 |
+
|
186 |
+
[INFO|2025-02-17 12:13:44] trainer.py:2377 >> Total optimization steps = 765
|
187 |
+
|
188 |
+
[INFO|2025-02-17 12:13:44] trainer.py:2378 >> Number of trainable parameters = 1,543,714,304
|
189 |
+
|
190 |
+
[INFO|2025-02-17 12:13:44] integration_utils.py:817 >> Automatic Weights & Biases logging enabled, to disable set os.environ["WANDB_DISABLED"] = "true"
|
191 |
+
|
192 |
+
[INFO|2025-02-17 12:14:20] logging.py:157 >> {'loss': 0.8739, 'learning_rate': 1.9998e-05, 'epoch': 0.10, 'throughput': 8024.43}
|
193 |
+
|
194 |
+
[INFO|2025-02-17 12:14:40] logging.py:157 >> {'loss': 0.7957, 'learning_rate': 1.9992e-05, 'epoch': 0.20, 'throughput': 8106.29}
|
195 |
+
|
196 |
+
[INFO|2025-02-17 12:14:59] logging.py:157 >> {'loss': 0.7822, 'learning_rate': 1.9981e-05, 'epoch': 0.29, 'throughput': 8219.61}
|
197 |
+
|
198 |
+
[INFO|2025-02-17 12:15:19] logging.py:157 >> {'loss': 0.7815, 'learning_rate': 1.9966e-05, 'epoch': 0.39, 'throughput': 8260.20}
|
199 |
+
|
200 |
+
[INFO|2025-02-17 12:15:38] logging.py:157 >> {'loss': 0.7432, 'learning_rate': 1.9947e-05, 'epoch': 0.49, 'throughput': 8269.83}
|
201 |
+
|
202 |
+
[INFO|2025-02-17 12:15:58] logging.py:157 >> {'loss': 0.7389, 'learning_rate': 1.9924e-05, 'epoch': 0.59, 'throughput': 8282.10}
|
203 |
+
|
204 |
+
[INFO|2025-02-17 12:16:17] logging.py:157 >> {'loss': 0.7654, 'learning_rate': 1.9897e-05, 'epoch': 0.68, 'throughput': 8305.86}
|
205 |
+
|
206 |
+
[INFO|2025-02-17 12:16:37] logging.py:157 >> {'loss': 0.7753, 'learning_rate': 1.9865e-05, 'epoch': 0.78, 'throughput': 8319.21}
|
207 |
+
|
208 |
+
[INFO|2025-02-17 12:16:56] logging.py:157 >> {'loss': 0.7256, 'learning_rate': 1.9830e-05, 'epoch': 0.88, 'throughput': 8321.93}
|
209 |
+
|
210 |
+
[INFO|2025-02-17 12:17:16] logging.py:157 >> {'loss': 0.7270, 'learning_rate': 1.9790e-05, 'epoch': 0.98, 'throughput': 8320.84}
|
211 |
+
|
212 |
+
[INFO|2025-02-17 12:17:32] logging.py:157 >> {'loss': 0.5820, 'learning_rate': 1.9746e-05, 'epoch': 1.06, 'throughput': 8319.31}
|
213 |
+
|
214 |
+
[INFO|2025-02-17 12:17:52] logging.py:157 >> {'loss': 0.5628, 'learning_rate': 1.9698e-05, 'epoch': 1.16, 'throughput': 8321.68}
|
215 |
+
|
216 |
+
[INFO|2025-02-17 12:18:11] logging.py:157 >> {'loss': 0.5561, 'learning_rate': 1.9646e-05, 'epoch': 1.25, 'throughput': 8326.47}
|
217 |
+
|
218 |
+
[INFO|2025-02-17 12:18:31] logging.py:157 >> {'loss': 0.5778, 'learning_rate': 1.9590e-05, 'epoch': 1.35, 'throughput': 8327.91}
|
219 |
+
|
220 |
+
[INFO|2025-02-17 12:18:51] logging.py:157 >> {'loss': 0.5207, 'learning_rate': 1.9529e-05, 'epoch': 1.45, 'throughput': 8328.77}
|
221 |
+
|
222 |
+
[INFO|2025-02-17 12:19:10] logging.py:157 >> {'loss': 0.5452, 'learning_rate': 1.9465e-05, 'epoch': 1.55, 'throughput': 8334.34}
|
223 |
+
|
224 |
+
[INFO|2025-02-17 12:19:29] logging.py:157 >> {'loss': 0.5379, 'learning_rate': 1.9397e-05, 'epoch': 1.65, 'throughput': 8341.18}
|
225 |
+
|
226 |
+
[INFO|2025-02-17 12:19:49] logging.py:157 >> {'loss': 0.5116, 'learning_rate': 1.9325e-05, 'epoch': 1.74, 'throughput': 8337.37}
|
227 |
+
|
228 |
+
[INFO|2025-02-17 12:20:09] logging.py:157 >> {'loss': 0.5532, 'learning_rate': 1.9249e-05, 'epoch': 1.84, 'throughput': 8339.42}
|
229 |
+
|
230 |
+
[INFO|2025-02-17 12:20:28] logging.py:157 >> {'loss': 0.5659, 'learning_rate': 1.9169e-05, 'epoch': 1.94, 'throughput': 8344.96}
|
231 |
+
|
232 |
+
[INFO|2025-02-17 12:20:28] trainer.py:3910 >> Saving model checkpoint to saves/Qwen2.5-1.5B/full/train_2025-02-17-12-11-10/checkpoint-100
|
233 |
+
|
234 |
+
[INFO|2025-02-17 12:20:28] configuration_utils.py:420 >> Configuration saved in saves/Qwen2.5-1.5B/full/train_2025-02-17-12-11-10/checkpoint-100/config.json
|
235 |
+
|
236 |
+
[INFO|2025-02-17 12:20:28] configuration_utils.py:909 >> Configuration saved in saves/Qwen2.5-1.5B/full/train_2025-02-17-12-11-10/checkpoint-100/generation_config.json
|
237 |
+
|
238 |
+
[INFO|2025-02-17 12:20:48] modeling_utils.py:2996 >> The model is bigger than the maximum size per checkpoint (5GB) and is going to be split in 2 checkpoint shards. You can find where each parameters has been saved in the index located at saves/Qwen2.5-1.5B/full/train_2025-02-17-12-11-10/checkpoint-100/model.safetensors.index.json.
|
239 |
+
|
240 |
+
[INFO|2025-02-17 12:20:48] tokenization_utils_base.py:2491 >> tokenizer config file saved in saves/Qwen2.5-1.5B/full/train_2025-02-17-12-11-10/checkpoint-100/tokenizer_config.json
|
241 |
+
|
242 |
+
[INFO|2025-02-17 12:20:48] tokenization_utils_base.py:2500 >> Special tokens file saved in saves/Qwen2.5-1.5B/full/train_2025-02-17-12-11-10/checkpoint-100/special_tokens_map.json
|
243 |
+
|
244 |
+
[INFO|2025-02-17 12:21:37] logging.py:157 >> {'loss': 0.6028, 'learning_rate': 1.9085e-05, 'epoch': 2.02, 'throughput': 7373.76}
|
245 |
+
|
246 |
+
[INFO|2025-02-17 12:21:57] logging.py:157 >> {'loss': 0.4228, 'learning_rate': 1.8997e-05, 'epoch': 2.12, 'throughput': 7412.32}
|
247 |
+
|
248 |
+
[INFO|2025-02-17 12:22:17] logging.py:157 >> {'loss': 0.3648, 'learning_rate': 1.8905e-05, 'epoch': 2.22, 'throughput': 7448.87}
|
249 |
+
|
250 |
+
[INFO|2025-02-17 12:22:36] logging.py:157 >> {'loss': 0.3822, 'learning_rate': 1.8810e-05, 'epoch': 2.31, 'throughput': 7486.42}
|
251 |
+
|
252 |
+
[INFO|2025-02-17 12:22:56] logging.py:157 >> {'loss': 0.3907, 'learning_rate': 1.8711e-05, 'epoch': 2.41, 'throughput': 7521.14}
|
253 |
+
|
254 |
+
[INFO|2025-02-17 12:23:15] logging.py:157 >> {'loss': 0.3936, 'learning_rate': 1.8608e-05, 'epoch': 2.51, 'throughput': 7551.36}
|
255 |
+
|
256 |
+
[INFO|2025-02-17 12:23:35] logging.py:157 >> {'loss': 0.3760, 'learning_rate': 1.8502e-05, 'epoch': 2.61, 'throughput': 7577.31}
|
257 |
+
|
258 |
+
[INFO|2025-02-17 12:23:55] logging.py:157 >> {'loss': 0.3735, 'learning_rate': 1.8392e-05, 'epoch': 2.70, 'throughput': 7602.53}
|
259 |
+
|
260 |
+
[INFO|2025-02-17 12:24:14] logging.py:157 >> {'loss': 0.4114, 'learning_rate': 1.8279e-05, 'epoch': 2.80, 'throughput': 7628.26}
|
261 |
+
|
262 |
+
[INFO|2025-02-17 12:24:33] logging.py:157 >> {'loss': 0.3696, 'learning_rate': 1.8162e-05, 'epoch': 2.90, 'throughput': 7652.37}
|
263 |
+
|
264 |
+
[INFO|2025-02-17 12:24:53] logging.py:157 >> {'loss': 0.3888, 'learning_rate': 1.8042e-05, 'epoch': 3.00, 'throughput': 7671.13}
|
265 |
+
|
266 |
+
[INFO|2025-02-17 12:25:09] logging.py:157 >> {'loss': 0.2605, 'learning_rate': 1.7918e-05, 'epoch': 3.08, 'throughput': 7688.94}
|
267 |
+
|
268 |
+
[INFO|2025-02-17 12:25:28] logging.py:157 >> {'loss': 0.2475, 'learning_rate': 1.7791e-05, 'epoch': 3.18, 'throughput': 7710.27}
|
269 |
+
|
270 |
+
[INFO|2025-02-17 12:25:48] logging.py:157 >> {'loss': 0.2344, 'learning_rate': 1.7660e-05, 'epoch': 3.27, 'throughput': 7726.87}
|
271 |
+
|
272 |
+
[INFO|2025-02-17 12:26:08] logging.py:157 >> {'loss': 0.2665, 'learning_rate': 1.7527e-05, 'epoch': 3.37, 'throughput': 7743.40}
|
273 |
+
|
274 |
+
[INFO|2025-02-17 12:26:27] logging.py:157 >> {'loss': 0.2477, 'learning_rate': 1.7390e-05, 'epoch': 3.47, 'throughput': 7760.52}
|
275 |
+
|
276 |
+
[INFO|2025-02-17 12:26:47] logging.py:157 >> {'loss': 0.2679, 'learning_rate': 1.7250e-05, 'epoch': 3.57, 'throughput': 7776.01}
|
277 |
+
|
278 |
+
[INFO|2025-02-17 12:27:07] logging.py:157 >> {'loss': 0.2340, 'learning_rate': 1.7107e-05, 'epoch': 3.67, 'throughput': 7790.11}
|
279 |
+
|
280 |
+
[INFO|2025-02-17 12:27:26] logging.py:157 >> {'loss': 0.2587, 'learning_rate': 1.6961e-05, 'epoch': 3.76, 'throughput': 7801.88}
|
281 |
+
|
282 |
+
[INFO|2025-02-17 12:27:46] logging.py:157 >> {'loss': 0.2606, 'learning_rate': 1.6812e-05, 'epoch': 3.86, 'throughput': 7815.59}
|
283 |
+
|
284 |
+
[INFO|2025-02-17 12:27:46] trainer.py:3910 >> Saving model checkpoint to saves/Qwen2.5-1.5B/full/train_2025-02-17-12-11-10/checkpoint-200
|
285 |
+
|
286 |
+
[INFO|2025-02-17 12:27:46] configuration_utils.py:420 >> Configuration saved in saves/Qwen2.5-1.5B/full/train_2025-02-17-12-11-10/checkpoint-200/config.json
|
287 |
+
|
288 |
+
[INFO|2025-02-17 12:27:46] configuration_utils.py:909 >> Configuration saved in saves/Qwen2.5-1.5B/full/train_2025-02-17-12-11-10/checkpoint-200/generation_config.json
|
289 |
+
|
290 |
+
[INFO|2025-02-17 12:28:07] modeling_utils.py:2996 >> The model is bigger than the maximum size per checkpoint (5GB) and is going to be split in 2 checkpoint shards. You can find where each parameters has been saved in the index located at saves/Qwen2.5-1.5B/full/train_2025-02-17-12-11-10/checkpoint-200/model.safetensors.index.json.
|
291 |
+
|
292 |
+
[INFO|2025-02-17 12:28:07] tokenization_utils_base.py:2491 >> tokenizer config file saved in saves/Qwen2.5-1.5B/full/train_2025-02-17-12-11-10/checkpoint-200/tokenizer_config.json
|
293 |
+
|
294 |
+
[INFO|2025-02-17 12:28:07] tokenization_utils_base.py:2500 >> Special tokens file saved in saves/Qwen2.5-1.5B/full/train_2025-02-17-12-11-10/checkpoint-200/special_tokens_map.json
|
295 |
+
|
296 |
+
[INFO|2025-02-17 12:29:01] logging.py:157 >> {'loss': 0.2543, 'learning_rate': 1.6661e-05, 'epoch': 3.96, 'throughput': 7347.53}
|
297 |
+
|
298 |
+
[INFO|2025-02-17 12:29:17] logging.py:157 >> {'loss': 0.1856, 'learning_rate': 1.6506e-05, 'epoch': 4.04, 'throughput': 7365.09}
|
299 |
+
|
300 |
+
[INFO|2025-02-17 12:29:36] logging.py:157 >> {'loss': 0.1462, 'learning_rate': 1.6349e-05, 'epoch': 4.14, 'throughput': 7387.60}
|
301 |
+
|
302 |
+
[INFO|2025-02-17 12:29:56] logging.py:157 >> {'loss': 0.1700, 'learning_rate': 1.6189e-05, 'epoch': 4.23, 'throughput': 7407.89}
|
303 |
+
|
304 |
+
[INFO|2025-02-17 12:30:15] logging.py:157 >> {'loss': 0.1427, 'learning_rate': 1.6026e-05, 'epoch': 4.33, 'throughput': 7426.73}
|
305 |
+
|
306 |
+
[INFO|2025-02-17 12:30:35] logging.py:157 >> {'loss': 0.1500, 'learning_rate': 1.5861e-05, 'epoch': 4.43, 'throughput': 7444.87}
|
307 |
+
|
308 |
+
[INFO|2025-02-17 12:30:54] logging.py:157 >> {'loss': 0.1603, 'learning_rate': 1.5694e-05, 'epoch': 4.53, 'throughput': 7464.06}
|
309 |
+
|
310 |
+
[INFO|2025-02-17 12:31:14] logging.py:157 >> {'loss': 0.1536, 'learning_rate': 1.5524e-05, 'epoch': 4.63, 'throughput': 7479.59}
|
311 |
+
|
312 |
+
[INFO|2025-02-17 12:31:34] logging.py:157 >> {'loss': 0.1415, 'learning_rate': 1.5351e-05, 'epoch': 4.72, 'throughput': 7495.78}
|
313 |
+
|
314 |
+
[INFO|2025-02-17 12:31:53] logging.py:157 >> {'loss': 0.1590, 'learning_rate': 1.5177e-05, 'epoch': 4.82, 'throughput': 7511.92}
|
315 |
+
|
316 |
+
[INFO|2025-02-17 12:32:13] logging.py:157 >> {'loss': 0.1699, 'learning_rate': 1.5000e-05, 'epoch': 4.92, 'throughput': 7526.38}
|
317 |
+
|
318 |
+
[INFO|2025-02-17 12:32:29] logging.py:157 >> {'loss': 0.1295, 'learning_rate': 1.4821e-05, 'epoch': 5.00, 'throughput': 7538.13}
|
319 |
+
|
320 |
+
[INFO|2025-02-17 12:32:48] logging.py:157 >> {'loss': 0.1019, 'learning_rate': 1.4640e-05, 'epoch': 5.10, 'throughput': 7553.80}
|
321 |
+
|
322 |
+
[INFO|2025-02-17 12:33:08] logging.py:157 >> {'loss': 0.0916, 'learning_rate': 1.4457e-05, 'epoch': 5.20, 'throughput': 7567.93}
|
323 |
+
|
324 |
+
[INFO|2025-02-17 12:33:27] logging.py:157 >> {'loss': 0.0935, 'learning_rate': 1.4273e-05, 'epoch': 5.29, 'throughput': 7583.13}
|
325 |
+
|
326 |
+
[INFO|2025-02-17 12:33:47] logging.py:157 >> {'loss': 0.0679, 'learning_rate': 1.4086e-05, 'epoch': 5.39, 'throughput': 7596.23}
|
327 |
+
|
328 |
+
[INFO|2025-02-17 12:34:06] logging.py:157 >> {'loss': 0.1010, 'learning_rate': 1.3898e-05, 'epoch': 5.49, 'throughput': 7607.87}
|
329 |
+
|
330 |
+
[INFO|2025-02-17 12:34:26] logging.py:157 >> {'loss': 0.0802, 'learning_rate': 1.3708e-05, 'epoch': 5.59, 'throughput': 7621.15}
|
331 |
+
|
332 |
+
[INFO|2025-02-17 12:34:46] logging.py:157 >> {'loss': 0.0812, 'learning_rate': 1.3516e-05, 'epoch': 5.68, 'throughput': 7631.54}
|
333 |
+
|
334 |
+
[INFO|2025-02-17 12:35:05] logging.py:157 >> {'loss': 0.0812, 'learning_rate': 1.3324e-05, 'epoch': 5.78, 'throughput': 7643.59}
|
335 |
+
|
336 |
+
[INFO|2025-02-17 12:35:05] trainer.py:3910 >> Saving model checkpoint to saves/Qwen2.5-1.5B/full/train_2025-02-17-12-11-10/checkpoint-300
|
337 |
+
|
338 |
+
[INFO|2025-02-17 12:35:05] configuration_utils.py:420 >> Configuration saved in saves/Qwen2.5-1.5B/full/train_2025-02-17-12-11-10/checkpoint-300/config.json
|
339 |
+
|
340 |
+
[INFO|2025-02-17 12:35:05] configuration_utils.py:909 >> Configuration saved in saves/Qwen2.5-1.5B/full/train_2025-02-17-12-11-10/checkpoint-300/generation_config.json
|
341 |
+
|
342 |
+
[INFO|2025-02-17 12:35:25] modeling_utils.py:2996 >> The model is bigger than the maximum size per checkpoint (5GB) and is going to be split in 2 checkpoint shards. You can find where each parameters has been saved in the index located at saves/Qwen2.5-1.5B/full/train_2025-02-17-12-11-10/checkpoint-300/model.safetensors.index.json.
|
343 |
+
|
344 |
+
[INFO|2025-02-17 12:35:25] tokenization_utils_base.py:2491 >> tokenizer config file saved in saves/Qwen2.5-1.5B/full/train_2025-02-17-12-11-10/checkpoint-300/tokenizer_config.json
|
345 |
+
|
346 |
+
[INFO|2025-02-17 12:35:25] tokenization_utils_base.py:2500 >> Special tokens file saved in saves/Qwen2.5-1.5B/full/train_2025-02-17-12-11-10/checkpoint-300/special_tokens_map.json
|
347 |
+
|
348 |
+
[INFO|2025-02-17 12:36:24] logging.py:157 >> {'loss': 0.0884, 'learning_rate': 1.3129e-05, 'epoch': 5.88, 'throughput': 7317.78}
|
349 |
+
|
350 |
+
[INFO|2025-02-17 12:36:44] logging.py:157 >> {'loss': 0.0906, 'learning_rate': 1.2934e-05, 'epoch': 5.98, 'throughput': 7332.20}
|
351 |
+
|
352 |
+
[INFO|2025-02-17 12:37:00] logging.py:157 >> {'loss': 0.0616, 'learning_rate': 1.2737e-05, 'epoch': 6.06, 'throughput': 7343.00}
|
353 |
+
|
354 |
+
[INFO|2025-02-17 12:37:19] logging.py:157 >> {'loss': 0.0447, 'learning_rate': 1.2539e-05, 'epoch': 6.16, 'throughput': 7357.50}
|
355 |
+
|
356 |
+
[INFO|2025-02-17 12:37:39] logging.py:157 >> {'loss': 0.0528, 'learning_rate': 1.2339e-05, 'epoch': 6.25, 'throughput': 7372.88}
|
357 |
+
|
358 |
+
[INFO|2025-02-17 12:37:58] logging.py:157 >> {'loss': 0.0429, 'learning_rate': 1.2139e-05, 'epoch': 6.35, 'throughput': 7387.40}
|
359 |
+
|
360 |
+
[INFO|2025-02-17 12:38:18] logging.py:157 >> {'loss': 0.0442, 'learning_rate': 1.1938e-05, 'epoch': 6.45, 'throughput': 7398.63}
|
361 |
+
|
362 |
+
[INFO|2025-02-17 12:38:37] logging.py:157 >> {'loss': 0.0470, 'learning_rate': 1.1736e-05, 'epoch': 6.55, 'throughput': 7411.53}
|
363 |
+
|
364 |
+
[INFO|2025-02-17 12:38:57] logging.py:157 >> {'loss': 0.0437, 'learning_rate': 1.1534e-05, 'epoch': 6.65, 'throughput': 7424.59}
|
365 |
+
|
366 |
+
[INFO|2025-02-17 12:39:17] logging.py:157 >> {'loss': 0.0466, 'learning_rate': 1.1331e-05, 'epoch': 6.74, 'throughput': 7436.34}
|
367 |
+
|
368 |
+
[INFO|2025-02-17 12:39:36] logging.py:157 >> {'loss': 0.0498, 'learning_rate': 1.1127e-05, 'epoch': 6.84, 'throughput': 7447.16}
|
369 |
+
|
370 |
+
[INFO|2025-02-17 12:39:56] logging.py:157 >> {'loss': 0.0409, 'learning_rate': 1.0923e-05, 'epoch': 6.94, 'throughput': 7459.25}
|
371 |
+
|
372 |
+
[INFO|2025-02-17 12:40:12] logging.py:157 >> {'loss': 0.0444, 'learning_rate': 1.0718e-05, 'epoch': 7.02, 'throughput': 7468.55}
|
373 |
+
|
374 |
+
[INFO|2025-02-17 12:40:31] logging.py:157 >> {'loss': 0.0212, 'learning_rate': 1.0513e-05, 'epoch': 7.12, 'throughput': 7479.66}
|
375 |
+
|
376 |
+
[INFO|2025-02-17 12:40:51] logging.py:157 >> {'loss': 0.0275, 'learning_rate': 1.0308e-05, 'epoch': 7.22, 'throughput': 7489.44}
|
377 |
+
|
378 |
+
[INFO|2025-02-17 12:41:10] logging.py:157 >> {'loss': 0.0234, 'learning_rate': 1.0103e-05, 'epoch': 7.31, 'throughput': 7500.49}
|
379 |
+
|
380 |
+
[INFO|2025-02-17 12:41:30] logging.py:157 >> {'loss': 0.0227, 'learning_rate': 9.8973e-06, 'epoch': 7.41, 'throughput': 7510.51}
|
381 |
+
|
382 |
+
[INFO|2025-02-17 12:41:50] logging.py:157 >> {'loss': 0.0264, 'learning_rate': 9.6920e-06, 'epoch': 7.51, 'throughput': 7520.71}
|
383 |
+
|
384 |
+
[INFO|2025-02-17 12:42:09] logging.py:157 >> {'loss': 0.0251, 'learning_rate': 9.4869e-06, 'epoch': 7.61, 'throughput': 7530.64}
|
385 |
+
|
386 |
+
[INFO|2025-02-17 12:42:29] logging.py:157 >> {'loss': 0.0237, 'learning_rate': 9.2820e-06, 'epoch': 7.70, 'throughput': 7541.10}
|
387 |
+
|
388 |
+
[INFO|2025-02-17 12:42:29] trainer.py:3910 >> Saving model checkpoint to saves/Qwen2.5-1.5B/full/train_2025-02-17-12-11-10/checkpoint-400
|
389 |
+
|
390 |
+
[INFO|2025-02-17 12:42:29] configuration_utils.py:420 >> Configuration saved in saves/Qwen2.5-1.5B/full/train_2025-02-17-12-11-10/checkpoint-400/config.json
|
391 |
+
|
392 |
+
[INFO|2025-02-17 12:42:29] configuration_utils.py:909 >> Configuration saved in saves/Qwen2.5-1.5B/full/train_2025-02-17-12-11-10/checkpoint-400/generation_config.json
|
393 |
+
|
394 |
+
[INFO|2025-02-17 12:42:49] modeling_utils.py:2996 >> The model is bigger than the maximum size per checkpoint (5GB) and is going to be split in 2 checkpoint shards. You can find where each parameters has been saved in the index located at saves/Qwen2.5-1.5B/full/train_2025-02-17-12-11-10/checkpoint-400/model.safetensors.index.json.
|
395 |
+
|
396 |
+
[INFO|2025-02-17 12:42:49] tokenization_utils_base.py:2491 >> tokenizer config file saved in saves/Qwen2.5-1.5B/full/train_2025-02-17-12-11-10/checkpoint-400/tokenizer_config.json
|
397 |
+
|
398 |
+
[INFO|2025-02-17 12:42:49] tokenization_utils_base.py:2500 >> Special tokens file saved in saves/Qwen2.5-1.5B/full/train_2025-02-17-12-11-10/checkpoint-400/special_tokens_map.json
|
399 |
+
|
400 |
+
[INFO|2025-02-17 12:43:46] logging.py:157 >> {'loss': 0.0228, 'learning_rate': 9.0773e-06, 'epoch': 7.80, 'throughput': 7303.95}
|
401 |
+
|
402 |
+
[INFO|2025-02-17 12:44:06] logging.py:157 >> {'loss': 0.0238, 'learning_rate': 8.8731e-06, 'epoch': 7.90, 'throughput': 7315.29}
|
403 |
+
|
404 |
+
[INFO|2025-02-17 12:44:26] logging.py:157 >> {'loss': 0.0222, 'learning_rate': 8.6693e-06, 'epoch': 8.00, 'throughput': 7326.04}
|
405 |
+
|
406 |
+
[INFO|2025-02-17 12:44:42] logging.py:157 >> {'loss': 0.0109, 'learning_rate': 8.4661e-06, 'epoch': 8.08, 'throughput': 7334.51}
|
407 |
+
|
408 |
+
[INFO|2025-02-17 12:45:01] logging.py:157 >> {'loss': 0.0118, 'learning_rate': 8.2635e-06, 'epoch': 8.18, 'throughput': 7345.75}
|
409 |
+
|
410 |
+
[INFO|2025-02-17 12:45:21] logging.py:157 >> {'loss': 0.0100, 'learning_rate': 8.0617e-06, 'epoch': 8.27, 'throughput': 7356.49}
|
411 |
+
|
412 |
+
[INFO|2025-02-17 12:45:40] logging.py:157 >> {'loss': 0.0112, 'learning_rate': 7.8607e-06, 'epoch': 8.37, 'throughput': 7367.37}
|
413 |
+
|
414 |
+
[INFO|2025-02-17 12:46:00] logging.py:157 >> {'loss': 0.0133, 'learning_rate': 7.6606e-06, 'epoch': 8.47, 'throughput': 7377.67}
|
415 |
+
|
416 |
+
[INFO|2025-02-17 12:46:19] logging.py:157 >> {'loss': 0.0113, 'learning_rate': 7.4614e-06, 'epoch': 8.57, 'throughput': 7388.41}
|
417 |
+
|
418 |
+
[INFO|2025-02-17 12:46:39] logging.py:157 >> {'loss': 0.0102, 'learning_rate': 7.2634e-06, 'epoch': 8.67, 'throughput': 7397.72}
|
419 |
+
|
420 |
+
[INFO|2025-02-17 12:46:58] logging.py:157 >> {'loss': 0.0100, 'learning_rate': 7.0665e-06, 'epoch': 8.76, 'throughput': 7408.35}
|
421 |
+
|
422 |
+
[INFO|2025-02-17 12:47:18] logging.py:157 >> {'loss': 0.0116, 'learning_rate': 6.8708e-06, 'epoch': 8.86, 'throughput': 7417.24}
|
423 |
+
|
424 |
+
[INFO|2025-02-17 12:47:38] logging.py:157 >> {'loss': 0.0111, 'learning_rate': 6.6765e-06, 'epoch': 8.96, 'throughput': 7425.37}
|
425 |
+
|
426 |
+
[INFO|2025-02-17 12:47:54] logging.py:157 >> {'loss': 0.0071, 'learning_rate': 6.4835e-06, 'epoch': 9.04, 'throughput': 7432.24}
|
427 |
+
|
428 |
+
[INFO|2025-02-17 12:48:13] logging.py:157 >> {'loss': 0.0054, 'learning_rate': 6.2920e-06, 'epoch': 9.14, 'throughput': 7440.97}
|
429 |
+
|
430 |
+
[INFO|2025-02-17 12:48:33] logging.py:157 >> {'loss': 0.0059, 'learning_rate': 6.1021e-06, 'epoch': 9.23, 'throughput': 7450.14}
|
431 |
+
|
432 |
+
[INFO|2025-02-17 12:48:53] logging.py:157 >> {'loss': 0.0045, 'learning_rate': 5.9139e-06, 'epoch': 9.33, 'throughput': 7458.50}
|
433 |
+
|
434 |
+
[INFO|2025-02-17 12:49:12] logging.py:157 >> {'loss': 0.0040, 'learning_rate': 5.7273e-06, 'epoch': 9.43, 'throughput': 7466.39}
|
435 |
+
|
436 |
+
[INFO|2025-02-17 12:49:32] logging.py:157 >> {'loss': 0.0042, 'learning_rate': 5.5426e-06, 'epoch': 9.53, 'throughput': 7474.49}
|
437 |
+
|
438 |
+
[INFO|2025-02-17 12:49:51] logging.py:157 >> {'loss': 0.0049, 'learning_rate': 5.3598e-06, 'epoch': 9.63, 'throughput': 7483.27}
|
439 |
+
|
440 |
+
[INFO|2025-02-17 12:49:51] trainer.py:3910 >> Saving model checkpoint to saves/Qwen2.5-1.5B/full/train_2025-02-17-12-11-10/checkpoint-500
|
441 |
+
|
442 |
+
[INFO|2025-02-17 12:49:51] configuration_utils.py:420 >> Configuration saved in saves/Qwen2.5-1.5B/full/train_2025-02-17-12-11-10/checkpoint-500/config.json
|
443 |
+
|
444 |
+
[INFO|2025-02-17 12:49:51] configuration_utils.py:909 >> Configuration saved in saves/Qwen2.5-1.5B/full/train_2025-02-17-12-11-10/checkpoint-500/generation_config.json
|
445 |
+
|
446 |
+
[INFO|2025-02-17 12:50:12] modeling_utils.py:2996 >> The model is bigger than the maximum size per checkpoint (5GB) and is going to be split in 2 checkpoint shards. You can find where each parameters has been saved in the index located at saves/Qwen2.5-1.5B/full/train_2025-02-17-12-11-10/checkpoint-500/model.safetensors.index.json.
|
447 |
+
|
448 |
+
[INFO|2025-02-17 12:50:12] tokenization_utils_base.py:2491 >> tokenizer config file saved in saves/Qwen2.5-1.5B/full/train_2025-02-17-12-11-10/checkpoint-500/tokenizer_config.json
|
449 |
+
|
450 |
+
[INFO|2025-02-17 12:50:12] tokenization_utils_base.py:2500 >> Special tokens file saved in saves/Qwen2.5-1.5B/full/train_2025-02-17-12-11-10/checkpoint-500/special_tokens_map.json
|
451 |
+
|
452 |
+
[INFO|2025-02-17 12:51:10] logging.py:157 >> {'loss': 0.0045, 'learning_rate': 5.1789e-06, 'epoch': 9.72, 'throughput': 7292.37}
|
453 |
+
|
454 |
+
[INFO|2025-02-17 12:51:30] logging.py:157 >> {'loss': 0.0040, 'learning_rate': 5.0000e-06, 'epoch': 9.82, 'throughput': 7300.78}
|
455 |
+
|
456 |
+
[INFO|2025-02-17 12:51:49] logging.py:157 >> {'loss': 0.0042, 'learning_rate': 4.8232e-06, 'epoch': 9.92, 'throughput': 7310.63}
|
457 |
+
|
458 |
+
[INFO|2025-02-17 12:52:05] logging.py:157 >> {'loss': 0.0037, 'learning_rate': 4.6487e-06, 'epoch': 10.00, 'throughput': 7317.82}
|
459 |
+
|
460 |
+
[INFO|2025-02-17 12:52:25] logging.py:157 >> {'loss': 0.0021, 'learning_rate': 4.4764e-06, 'epoch': 10.10, 'throughput': 7327.07}
|
461 |
+
|
462 |
+
[INFO|2025-02-17 12:52:44] logging.py:157 >> {'loss': 0.0020, 'learning_rate': 4.3064e-06, 'epoch': 10.20, 'throughput': 7335.94}
|
463 |
+
|
464 |
+
[INFO|2025-02-17 12:53:04] logging.py:157 >> {'loss': 0.0023, 'learning_rate': 4.1388e-06, 'epoch': 10.29, 'throughput': 7344.64}
|
465 |
+
|
466 |
+
[INFO|2025-02-17 12:53:24] logging.py:157 >> {'loss': 0.0020, 'learning_rate': 3.9737e-06, 'epoch': 10.39, 'throughput': 7352.70}
|
467 |
+
|
468 |
+
[INFO|2025-02-17 12:53:43] logging.py:157 >> {'loss': 0.0021, 'learning_rate': 3.8111e-06, 'epoch': 10.49, 'throughput': 7360.36}
|
469 |
+
|
470 |
+
[INFO|2025-02-17 12:54:03] logging.py:157 >> {'loss': 0.0025, 'learning_rate': 3.6511e-06, 'epoch': 10.59, 'throughput': 7369.18}
|
471 |
+
|
472 |
+
[INFO|2025-02-17 12:54:22] logging.py:157 >> {'loss': 0.0021, 'learning_rate': 3.4938e-06, 'epoch': 10.68, 'throughput': 7377.11}
|
473 |
+
|
474 |
+
[INFO|2025-02-17 12:54:42] logging.py:157 >> {'loss': 0.0017, 'learning_rate': 3.3393e-06, 'epoch': 10.78, 'throughput': 7384.90}
|
475 |
+
|
476 |
+
[INFO|2025-02-17 12:55:02] logging.py:157 >> {'loss': 0.0018, 'learning_rate': 3.1875e-06, 'epoch': 10.88, 'throughput': 7392.59}
|
477 |
+
|
478 |
+
[INFO|2025-02-17 12:55:21] logging.py:157 >> {'loss': 0.0018, 'learning_rate': 3.0387e-06, 'epoch': 10.98, 'throughput': 7400.64}
|
479 |
+
|
480 |
+
[INFO|2025-02-17 12:55:37] logging.py:157 >> {'loss': 0.0014, 'learning_rate': 2.8927e-06, 'epoch': 11.06, 'throughput': 7406.36}
|
481 |
+
|
482 |
+
[INFO|2025-02-17 12:55:57] logging.py:157 >> {'loss': 0.0013, 'learning_rate': 2.7498e-06, 'epoch': 11.16, 'throughput': 7413.44}
|
483 |
+
|
484 |
+
[INFO|2025-02-17 12:56:16] logging.py:157 >> {'loss': 0.0014, 'learning_rate': 2.6099e-06, 'epoch': 11.25, 'throughput': 7420.91}
|
485 |
+
|
486 |
+
[INFO|2025-02-17 12:56:36] logging.py:157 >> {'loss': 0.0013, 'learning_rate': 2.4731e-06, 'epoch': 11.35, 'throughput': 7427.77}
|
487 |
+
|
488 |
+
[INFO|2025-02-17 12:56:56] logging.py:157 >> {'loss': 0.0013, 'learning_rate': 2.3396e-06, 'epoch': 11.45, 'throughput': 7434.56}
|
489 |
+
|
490 |
+
[INFO|2025-02-17 12:57:15] logging.py:157 >> {'loss': 0.0012, 'learning_rate': 2.2092e-06, 'epoch': 11.55, 'throughput': 7441.93}
|
491 |
+
|
492 |
+
[INFO|2025-02-17 12:57:15] trainer.py:3910 >> Saving model checkpoint to saves/Qwen2.5-1.5B/full/train_2025-02-17-12-11-10/checkpoint-600
|
493 |
+
|
494 |
+
[INFO|2025-02-17 12:57:15] configuration_utils.py:420 >> Configuration saved in saves/Qwen2.5-1.5B/full/train_2025-02-17-12-11-10/checkpoint-600/config.json
|
495 |
+
|
496 |
+
[INFO|2025-02-17 12:57:15] configuration_utils.py:909 >> Configuration saved in saves/Qwen2.5-1.5B/full/train_2025-02-17-12-11-10/checkpoint-600/generation_config.json
|
497 |
+
|
498 |
+
[INFO|2025-02-17 12:57:37] modeling_utils.py:2996 >> The model is bigger than the maximum size per checkpoint (5GB) and is going to be split in 2 checkpoint shards. You can find where each parameters has been saved in the index located at saves/Qwen2.5-1.5B/full/train_2025-02-17-12-11-10/checkpoint-600/model.safetensors.index.json.
|
499 |
+
|
500 |
+
[INFO|2025-02-17 12:57:37] tokenization_utils_base.py:2491 >> tokenizer config file saved in saves/Qwen2.5-1.5B/full/train_2025-02-17-12-11-10/checkpoint-600/tokenizer_config.json
|
501 |
+
|
502 |
+
[INFO|2025-02-17 12:57:37] tokenization_utils_base.py:2500 >> Special tokens file saved in saves/Qwen2.5-1.5B/full/train_2025-02-17-12-11-10/checkpoint-600/special_tokens_map.json
|
503 |
+
|
504 |
+
[INFO|2025-02-17 12:58:39] logging.py:157 >> {'loss': 0.0013, 'learning_rate': 2.0821e-06, 'epoch': 11.65, 'throughput': 7271.40}
|
505 |
+
|
506 |
+
[INFO|2025-02-17 12:58:58] logging.py:157 >> {'loss': 0.0013, 'learning_rate': 1.9584e-06, 'epoch': 11.74, 'throughput': 7279.67}
|
507 |
+
|
508 |
+
[INFO|2025-02-17 12:59:17] logging.py:157 >> {'loss': 0.0014, 'learning_rate': 1.8380e-06, 'epoch': 11.84, 'throughput': 7287.84}
|
509 |
+
|
510 |
+
[INFO|2025-02-17 12:59:37] logging.py:157 >> {'loss': 0.0012, 'learning_rate': 1.7211e-06, 'epoch': 11.94, 'throughput': 7295.63}
|
511 |
+
|
512 |
+
[INFO|2025-02-17 12:59:53] logging.py:157 >> {'loss': 0.0014, 'learning_rate': 1.6077e-06, 'epoch': 12.02, 'throughput': 7301.92}
|
513 |
+
|
514 |
+
[INFO|2025-02-17 13:00:13] logging.py:157 >> {'loss': 0.0011, 'learning_rate': 1.4978e-06, 'epoch': 12.12, 'throughput': 7308.89}
|
515 |
+
|
516 |
+
[INFO|2025-02-17 13:00:32] logging.py:157 >> {'loss': 0.0012, 'learning_rate': 1.3915e-06, 'epoch': 12.22, 'throughput': 7316.86}
|
517 |
+
|
518 |
+
[INFO|2025-02-17 13:00:52] logging.py:157 >> {'loss': 0.0011, 'learning_rate': 1.2889e-06, 'epoch': 12.31, 'throughput': 7324.06}
|
519 |
+
|
520 |
+
[INFO|2025-02-17 13:01:11] logging.py:157 >> {'loss': 0.0011, 'learning_rate': 1.1899e-06, 'epoch': 12.41, 'throughput': 7331.13}
|
521 |
+
|
522 |
+
[INFO|2025-02-17 13:01:31] logging.py:157 >> {'loss': 0.0012, 'learning_rate': 1.0946e-06, 'epoch': 12.51, 'throughput': 7338.53}
|
523 |
+
|
524 |
+
[INFO|2025-02-17 13:01:50] logging.py:157 >> {'loss': 0.0010, 'learning_rate': 1.0031e-06, 'epoch': 12.61, 'throughput': 7345.34}
|
525 |
+
|
526 |
+
[INFO|2025-02-17 13:02:10] logging.py:157 >> {'loss': 0.0010, 'learning_rate': 9.1535e-07, 'epoch': 12.70, 'throughput': 7352.25}
|
527 |
+
|
528 |
+
[INFO|2025-02-17 13:02:30] logging.py:157 >> {'loss': 0.0011, 'learning_rate': 8.3145e-07, 'epoch': 12.80, 'throughput': 7359.27}
|
529 |
+
|
530 |
+
[INFO|2025-02-17 13:02:49] logging.py:157 >> {'loss': 0.0011, 'learning_rate': 7.5141e-07, 'epoch': 12.90, 'throughput': 7365.77}
|
531 |
+
|
532 |
+
[INFO|2025-02-17 13:03:09] logging.py:157 >> {'loss': 0.0011, 'learning_rate': 6.7528e-07, 'epoch': 13.00, 'throughput': 7372.62}
|
533 |
+
|
534 |
+
[INFO|2025-02-17 13:03:25] logging.py:157 >> {'loss': 0.0011, 'learning_rate': 6.0307e-07, 'epoch': 13.08, 'throughput': 7377.30}
|
535 |
+
|
536 |
+
[INFO|2025-02-17 13:03:44] logging.py:157 >> {'loss': 0.0010, 'learning_rate': 5.3483e-07, 'epoch': 13.18, 'throughput': 7383.80}
|
537 |
+
|
538 |
+
[INFO|2025-02-17 13:04:04] logging.py:157 >> {'loss': 0.0010, 'learning_rate': 4.7058e-07, 'epoch': 13.27, 'throughput': 7390.60}
|
539 |
+
|
540 |
+
[INFO|2025-02-17 13:04:23] logging.py:157 >> {'loss': 0.0011, 'learning_rate': 4.1035e-07, 'epoch': 13.37, 'throughput': 7396.91}
|
541 |
+
|
542 |
+
[INFO|2025-02-17 13:04:43] logging.py:157 >> {'loss': 0.0011, 'learning_rate': 3.5415e-07, 'epoch': 13.47, 'throughput': 7402.92}
|
543 |
+
|
544 |
+
[INFO|2025-02-17 13:04:43] trainer.py:3910 >> Saving model checkpoint to saves/Qwen2.5-1.5B/full/train_2025-02-17-12-11-10/checkpoint-700
|
545 |
+
|
546 |
+
[INFO|2025-02-17 13:04:43] configuration_utils.py:420 >> Configuration saved in saves/Qwen2.5-1.5B/full/train_2025-02-17-12-11-10/checkpoint-700/config.json
|
547 |
+
|
548 |
+
[INFO|2025-02-17 13:04:43] configuration_utils.py:909 >> Configuration saved in saves/Qwen2.5-1.5B/full/train_2025-02-17-12-11-10/checkpoint-700/generation_config.json
|
549 |
+
|
550 |
+
[INFO|2025-02-17 13:05:09] modeling_utils.py:2996 >> The model is bigger than the maximum size per checkpoint (5GB) and is going to be split in 2 checkpoint shards. You can find where each parameters has been saved in the index located at saves/Qwen2.5-1.5B/full/train_2025-02-17-12-11-10/checkpoint-700/model.safetensors.index.json.
|
551 |
+
|
552 |
+
[INFO|2025-02-17 13:05:09] tokenization_utils_base.py:2491 >> tokenizer config file saved in saves/Qwen2.5-1.5B/full/train_2025-02-17-12-11-10/checkpoint-700/tokenizer_config.json
|
553 |
+
|
554 |
+
[INFO|2025-02-17 13:05:09] tokenization_utils_base.py:2500 >> Special tokens file saved in saves/Qwen2.5-1.5B/full/train_2025-02-17-12-11-10/checkpoint-700/special_tokens_map.json
|
555 |
+
|
556 |
+
[INFO|2025-02-17 13:06:06] logging.py:157 >> {'loss': 0.0009, 'learning_rate': 3.0203e-07, 'epoch': 13.57, 'throughput': 7259.08}
|
557 |
+
|
558 |
+
[INFO|2025-02-17 13:06:25] logging.py:157 >> {'loss': 0.0011, 'learning_rate': 2.5399e-07, 'epoch': 13.67, 'throughput': 7266.36}
|
559 |
+
|
560 |
+
[INFO|2025-02-17 13:06:45] logging.py:157 >> {'loss': 0.0010, 'learning_rate': 2.1007e-07, 'epoch': 13.76, 'throughput': 7273.17}
|
561 |
+
|
562 |
+
[INFO|2025-02-17 13:07:05] logging.py:157 >> {'loss': 0.0010, 'learning_rate': 1.7027e-07, 'epoch': 13.86, 'throughput': 7280.07}
|
563 |
+
|
564 |
+
[INFO|2025-02-17 13:07:24] logging.py:157 >> {'loss': 0.0010, 'learning_rate': 1.3461e-07, 'epoch': 13.96, 'throughput': 7286.52}
|
565 |
+
|
566 |
+
[INFO|2025-02-17 13:07:40] logging.py:157 >> {'loss': 0.0010, 'learning_rate': 1.0312e-07, 'epoch': 14.04, 'throughput': 7291.00}
|
567 |
+
|
568 |
+
[INFO|2025-02-17 13:08:00] logging.py:157 >> {'loss': 0.0010, 'learning_rate': 7.5795e-08, 'epoch': 14.14, 'throughput': 7297.68}
|
569 |
+
|
570 |
+
[INFO|2025-02-17 13:08:19] logging.py:157 >> {'loss': 0.0010, 'learning_rate': 5.2656e-08, 'epoch': 14.23, 'throughput': 7304.61}
|
571 |
+
|
572 |
+
[INFO|2025-02-17 13:08:39] logging.py:157 >> {'loss': 0.0009, 'learning_rate': 3.3710e-08, 'epoch': 14.33, 'throughput': 7311.13}
|
573 |
+
|
574 |
+
[INFO|2025-02-17 13:08:58] logging.py:157 >> {'loss': 0.0010, 'learning_rate': 1.8967e-08, 'epoch': 14.43, 'throughput': 7317.57}
|
575 |
+
|
576 |
+
[INFO|2025-02-17 13:09:18] logging.py:157 >> {'loss': 0.0010, 'learning_rate': 8.4311e-09, 'epoch': 14.53, 'throughput': 7322.85}
|
577 |
+
|
578 |
+
[INFO|2025-02-17 13:09:38] logging.py:157 >> {'loss': 0.0010, 'learning_rate': 2.1080e-09, 'epoch': 14.63, 'throughput': 7328.91}
|
579 |
+
|
580 |
+
[INFO|2025-02-17 13:09:57] logging.py:157 >> {'loss': 0.0010, 'learning_rate': 0.0000e+00, 'epoch': 14.72, 'throughput': 7335.46}
|
581 |
+
|
582 |
+
[INFO|2025-02-17 13:09:57] trainer.py:3910 >> Saving model checkpoint to saves/Qwen2.5-1.5B/full/train_2025-02-17-12-11-10/checkpoint-765
|
583 |
+
|
584 |
+
[INFO|2025-02-17 13:09:57] configuration_utils.py:420 >> Configuration saved in saves/Qwen2.5-1.5B/full/train_2025-02-17-12-11-10/checkpoint-765/config.json
|
585 |
+
|
586 |
+
[INFO|2025-02-17 13:09:57] configuration_utils.py:909 >> Configuration saved in saves/Qwen2.5-1.5B/full/train_2025-02-17-12-11-10/checkpoint-765/generation_config.json
|
587 |
+
|
588 |
+
[INFO|2025-02-17 13:10:18] modeling_utils.py:2996 >> The model is bigger than the maximum size per checkpoint (5GB) and is going to be split in 2 checkpoint shards. You can find where each parameters has been saved in the index located at saves/Qwen2.5-1.5B/full/train_2025-02-17-12-11-10/checkpoint-765/model.safetensors.index.json.
|
589 |
+
|
590 |
+
[INFO|2025-02-17 13:10:18] tokenization_utils_base.py:2491 >> tokenizer config file saved in saves/Qwen2.5-1.5B/full/train_2025-02-17-12-11-10/checkpoint-765/tokenizer_config.json
|
591 |
+
|
592 |
+
[INFO|2025-02-17 13:10:18] tokenization_utils_base.py:2500 >> Special tokens file saved in saves/Qwen2.5-1.5B/full/train_2025-02-17-12-11-10/checkpoint-765/special_tokens_map.json
|
593 |
+
|
594 |
+
[INFO|2025-02-17 13:10:50] trainer.py:2643 >>
|
595 |
+
|
596 |
+
Training completed. Do not forget to share your model on huggingface.co/models =)
|
597 |
+
|
598 |
+
|
599 |
+
|
600 |
+
[INFO|2025-02-17 13:10:50] trainer.py:3910 >> Saving model checkpoint to saves/Qwen2.5-1.5B/full/train_2025-02-17-12-11-10
|
601 |
+
|
602 |
+
[INFO|2025-02-17 13:10:50] configuration_utils.py:420 >> Configuration saved in saves/Qwen2.5-1.5B/full/train_2025-02-17-12-11-10/config.json
|
603 |
+
|
604 |
+
[INFO|2025-02-17 13:10:50] configuration_utils.py:909 >> Configuration saved in saves/Qwen2.5-1.5B/full/train_2025-02-17-12-11-10/generation_config.json
|
605 |
+
|
606 |
+
[INFO|2025-02-17 13:11:12] modeling_utils.py:2996 >> The model is bigger than the maximum size per checkpoint (5GB) and is going to be split in 2 checkpoint shards. You can find where each parameters has been saved in the index located at saves/Qwen2.5-1.5B/full/train_2025-02-17-12-11-10/model.safetensors.index.json.
|
607 |
+
|
608 |
+
[INFO|2025-02-17 13:11:12] tokenization_utils_base.py:2491 >> tokenizer config file saved in saves/Qwen2.5-1.5B/full/train_2025-02-17-12-11-10/tokenizer_config.json
|
609 |
+
|
610 |
+
[INFO|2025-02-17 13:11:12] tokenization_utils_base.py:2500 >> Special tokens file saved in saves/Qwen2.5-1.5B/full/train_2025-02-17-12-11-10/special_tokens_map.json
|
611 |
+
|
612 |
+
[WARNING|2025-02-17 13:11:13] logging.py:162 >> No metric eval_loss to plot.
|
613 |
+
|
614 |
+
[WARNING|2025-02-17 13:11:13] logging.py:162 >> No metric eval_accuracy to plot.
|
615 |
+
|
616 |
+
[INFO|2025-02-17 13:11:13] modelcard.py:449 >> Dropping the following result as it does not have all the necessary fields:
|
617 |
+
{'task': {'name': 'Causal Language Modeling', 'type': 'text-generation'}}
|
618 |
+
|