Training in progress, epoch 1
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- model.safetensors +1 -1
- run-0/checkpoint-144/model.safetensors +1 -1
- run-0/checkpoint-144/optimizer.pt +1 -1
- run-0/checkpoint-144/rng_state.pth +1 -1
- run-0/checkpoint-144/scheduler.pt +1 -1
- run-0/checkpoint-144/trainer_state.json +23 -13
- run-0/checkpoint-144/training_args.bin +1 -1
- run-0/checkpoint-216/model.safetensors +1 -1
- run-0/checkpoint-216/optimizer.pt +1 -1
- run-0/checkpoint-216/rng_state.pth +1 -1
- run-0/checkpoint-216/scheduler.pt +1 -1
- run-0/checkpoint-216/trainer_state.json +24 -24
- run-0/checkpoint-216/training_args.bin +1 -1
- run-0/checkpoint-288/model.safetensors +1 -1
- run-0/checkpoint-288/optimizer.pt +1 -1
- run-0/checkpoint-288/rng_state.pth +1 -1
- run-0/checkpoint-288/scheduler.pt +1 -1
- run-0/checkpoint-288/trainer_state.json +40 -20
- run-0/checkpoint-288/training_args.bin +1 -1
- run-0/checkpoint-360/config.json +31 -0
- run-0/checkpoint-360/model.safetensors +3 -0
- run-0/checkpoint-360/optimizer.pt +3 -0
- run-0/checkpoint-360/rng_state.pth +3 -0
- run-0/checkpoint-360/scheduler.pt +3 -0
- run-0/checkpoint-360/special_tokens_map.json +7 -0
- run-0/checkpoint-360/tokenizer.json +0 -0
- run-0/checkpoint-360/tokenizer_config.json +55 -0
- run-0/checkpoint-360/trainer_state.json +76 -0
- run-0/checkpoint-360/training_args.bin +3 -0
- run-0/checkpoint-360/vocab.txt +0 -0
- run-1/checkpoint-144/model.safetensors +1 -1
- run-1/checkpoint-144/optimizer.pt +1 -1
- run-1/checkpoint-144/rng_state.pth +1 -1
- run-1/checkpoint-144/scheduler.pt +1 -1
- run-1/checkpoint-144/trainer_state.json +18 -18
- run-1/checkpoint-144/training_args.bin +1 -1
- run-1/checkpoint-216/model.safetensors +1 -1
- run-1/checkpoint-216/optimizer.pt +1 -1
- run-1/checkpoint-216/rng_state.pth +1 -1
- run-1/checkpoint-216/scheduler.pt +1 -1
- run-1/checkpoint-216/trainer_state.json +24 -24
- run-1/checkpoint-216/training_args.bin +1 -1
- run-1/checkpoint-288/config.json +31 -0
- run-1/checkpoint-288/model.safetensors +3 -0
- run-1/checkpoint-288/optimizer.pt +3 -0
- run-1/checkpoint-288/rng_state.pth +3 -0
- run-1/checkpoint-288/scheduler.pt +3 -0
- run-1/checkpoint-288/special_tokens_map.json +7 -0
- run-1/checkpoint-288/tokenizer.json +0 -0
- run-1/checkpoint-288/tokenizer_config.json +55 -0
model.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 267829484
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:475be2c7e113d40683406e8823d7389e3567d12d4398a875869febd179abda22
|
3 |
size 267829484
|
run-0/checkpoint-144/model.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 267829484
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:24a5f7a8fe85b355597918b531bd148f2610dab6811eed29b4bdd0dd62e2ba26
|
3 |
size 267829484
|
run-0/checkpoint-144/optimizer.pt
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 535721146
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:0627630a5b8b7064b694b2d490e2239ba6f226c5b73473e1bd215b592cb87b1d
|
3 |
size 535721146
|
run-0/checkpoint-144/rng_state.pth
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 14244
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:0f6b60aad452bff3447ffca2a36beeee0343938e34ba314a3008c1ede5f8c3f6
|
3 |
size 14244
|
run-0/checkpoint-144/scheduler.pt
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 1064
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:3ed78427fd57c9e2deb308e475e266ba8a5ec7c042923d27ac3e7597ad3b96c7
|
3 |
size 1064
|
run-0/checkpoint-144/trainer_state.json
CHANGED
@@ -1,7 +1,7 @@
|
|
1 |
{
|
2 |
-
"best_metric": 0.
|
3 |
"best_model_checkpoint": "distilbert-base-uncased-finetuned-stsb/run-0/checkpoint-144",
|
4 |
-
"epoch":
|
5 |
"eval_steps": 500,
|
6 |
"global_step": 144,
|
7 |
"is_hyper_param_search": true,
|
@@ -10,27 +10,37 @@
|
|
10 |
"log_history": [
|
11 |
{
|
12 |
"epoch": 1.0,
|
13 |
-
"eval_loss":
|
14 |
-
"eval_pearson": 0.
|
15 |
-
"eval_runtime": 0.
|
16 |
-
"eval_samples_per_second":
|
17 |
-
"eval_spearmanr": 0.
|
18 |
-
"eval_steps_per_second":
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
19 |
"step": 144
|
20 |
}
|
21 |
],
|
22 |
"logging_steps": 500,
|
23 |
-
"max_steps":
|
24 |
"num_input_tokens_seen": 0,
|
25 |
"num_train_epochs": 5,
|
26 |
"save_steps": 500,
|
27 |
"total_flos": 0,
|
28 |
-
"train_batch_size":
|
29 |
"trial_name": null,
|
30 |
"trial_params": {
|
31 |
-
"learning_rate":
|
32 |
"num_train_epochs": 5,
|
33 |
-
"per_device_train_batch_size":
|
34 |
-
"seed":
|
35 |
}
|
36 |
}
|
|
|
1 |
{
|
2 |
+
"best_metric": 0.8184498144194529,
|
3 |
"best_model_checkpoint": "distilbert-base-uncased-finetuned-stsb/run-0/checkpoint-144",
|
4 |
+
"epoch": 2.0,
|
5 |
"eval_steps": 500,
|
6 |
"global_step": 144,
|
7 |
"is_hyper_param_search": true,
|
|
|
10 |
"log_history": [
|
11 |
{
|
12 |
"epoch": 1.0,
|
13 |
+
"eval_loss": 1.1317814588546753,
|
14 |
+
"eval_pearson": 0.7925397117075783,
|
15 |
+
"eval_runtime": 0.9029,
|
16 |
+
"eval_samples_per_second": 1661.35,
|
17 |
+
"eval_spearmanr": 0.7981526098929177,
|
18 |
+
"eval_steps_per_second": 104.111,
|
19 |
+
"step": 72
|
20 |
+
},
|
21 |
+
{
|
22 |
+
"epoch": 2.0,
|
23 |
+
"eval_loss": 0.8140504360198975,
|
24 |
+
"eval_pearson": 0.8184498144194529,
|
25 |
+
"eval_runtime": 1.1042,
|
26 |
+
"eval_samples_per_second": 1358.431,
|
27 |
+
"eval_spearmanr": 0.8199506621029472,
|
28 |
+
"eval_steps_per_second": 85.128,
|
29 |
"step": 144
|
30 |
}
|
31 |
],
|
32 |
"logging_steps": 500,
|
33 |
+
"max_steps": 360,
|
34 |
"num_input_tokens_seen": 0,
|
35 |
"num_train_epochs": 5,
|
36 |
"save_steps": 500,
|
37 |
"total_flos": 0,
|
38 |
+
"train_batch_size": 8,
|
39 |
"trial_name": null,
|
40 |
"trial_params": {
|
41 |
+
"learning_rate": 7.503545930910804e-05,
|
42 |
"num_train_epochs": 5,
|
43 |
+
"per_device_train_batch_size": 8,
|
44 |
+
"seed": 19
|
45 |
}
|
46 |
}
|
run-0/checkpoint-144/training_args.bin
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 4920
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:34840284466eb2205f9f10ed091682dc9319569f75d63faf8795d5f603abf328
|
3 |
size 4920
|
run-0/checkpoint-216/model.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 267829484
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:426c255c1ce8b20c685bae6f9ffad5d09a0a071cc92c4c571006322e326ed012
|
3 |
size 267829484
|
run-0/checkpoint-216/optimizer.pt
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 535721146
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:827578c0047df3cab5efc5e50feadc32d168495e61866ef91c832f1ce5782b47
|
3 |
size 535721146
|
run-0/checkpoint-216/rng_state.pth
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 14244
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:dea1e3237b5de73b8d4f4afa8a389d9785cb7e1f3d1738843f3cd66c39f997fb
|
3 |
size 14244
|
run-0/checkpoint-216/scheduler.pt
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 1064
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b69465340872716a81e7ec58bbb9c8e5c56bb0b96c0110eacff6410fc47830bb
|
3 |
size 1064
|
run-0/checkpoint-216/trainer_state.json
CHANGED
@@ -1,5 +1,5 @@
|
|
1 |
{
|
2 |
-
"best_metric": 0.
|
3 |
"best_model_checkpoint": "distilbert-base-uncased-finetuned-stsb/run-0/checkpoint-216",
|
4 |
"epoch": 3.0,
|
5 |
"eval_steps": 500,
|
@@ -10,47 +10,47 @@
|
|
10 |
"log_history": [
|
11 |
{
|
12 |
"epoch": 1.0,
|
13 |
-
"eval_loss": 1.
|
14 |
-
"eval_pearson": 0.
|
15 |
-
"eval_runtime": 0.
|
16 |
-
"eval_samples_per_second":
|
17 |
-
"eval_spearmanr": 0.
|
18 |
-
"eval_steps_per_second":
|
19 |
"step": 72
|
20 |
},
|
21 |
{
|
22 |
"epoch": 2.0,
|
23 |
-
"eval_loss": 0.
|
24 |
-
"eval_pearson": 0.
|
25 |
-
"eval_runtime": 1.
|
26 |
-
"eval_samples_per_second":
|
27 |
-
"eval_spearmanr": 0.
|
28 |
-
"eval_steps_per_second":
|
29 |
"step": 144
|
30 |
},
|
31 |
{
|
32 |
"epoch": 3.0,
|
33 |
-
"eval_loss": 0.
|
34 |
-
"eval_pearson": 0.
|
35 |
-
"eval_runtime":
|
36 |
-
"eval_samples_per_second":
|
37 |
-
"eval_spearmanr": 0.
|
38 |
-
"eval_steps_per_second":
|
39 |
"step": 216
|
40 |
}
|
41 |
],
|
42 |
"logging_steps": 500,
|
43 |
-
"max_steps":
|
44 |
"num_input_tokens_seen": 0,
|
45 |
-
"num_train_epochs":
|
46 |
"save_steps": 500,
|
47 |
"total_flos": 0,
|
48 |
"train_batch_size": 8,
|
49 |
"trial_name": null,
|
50 |
"trial_params": {
|
51 |
-
"learning_rate":
|
52 |
-
"num_train_epochs":
|
53 |
"per_device_train_batch_size": 8,
|
54 |
-
"seed":
|
55 |
}
|
56 |
}
|
|
|
1 |
{
|
2 |
+
"best_metric": 0.8350533952135925,
|
3 |
"best_model_checkpoint": "distilbert-base-uncased-finetuned-stsb/run-0/checkpoint-216",
|
4 |
"epoch": 3.0,
|
5 |
"eval_steps": 500,
|
|
|
10 |
"log_history": [
|
11 |
{
|
12 |
"epoch": 1.0,
|
13 |
+
"eval_loss": 1.1317814588546753,
|
14 |
+
"eval_pearson": 0.7925397117075783,
|
15 |
+
"eval_runtime": 0.9029,
|
16 |
+
"eval_samples_per_second": 1661.35,
|
17 |
+
"eval_spearmanr": 0.7981526098929177,
|
18 |
+
"eval_steps_per_second": 104.111,
|
19 |
"step": 72
|
20 |
},
|
21 |
{
|
22 |
"epoch": 2.0,
|
23 |
+
"eval_loss": 0.8140504360198975,
|
24 |
+
"eval_pearson": 0.8184498144194529,
|
25 |
+
"eval_runtime": 1.1042,
|
26 |
+
"eval_samples_per_second": 1358.431,
|
27 |
+
"eval_spearmanr": 0.8199506621029472,
|
28 |
+
"eval_steps_per_second": 85.128,
|
29 |
"step": 144
|
30 |
},
|
31 |
{
|
32 |
"epoch": 3.0,
|
33 |
+
"eval_loss": 0.7895862460136414,
|
34 |
+
"eval_pearson": 0.8350533952135925,
|
35 |
+
"eval_runtime": 1.2556,
|
36 |
+
"eval_samples_per_second": 1194.637,
|
37 |
+
"eval_spearmanr": 0.8311768906317339,
|
38 |
+
"eval_steps_per_second": 74.864,
|
39 |
"step": 216
|
40 |
}
|
41 |
],
|
42 |
"logging_steps": 500,
|
43 |
+
"max_steps": 360,
|
44 |
"num_input_tokens_seen": 0,
|
45 |
+
"num_train_epochs": 5,
|
46 |
"save_steps": 500,
|
47 |
"total_flos": 0,
|
48 |
"train_batch_size": 8,
|
49 |
"trial_name": null,
|
50 |
"trial_params": {
|
51 |
+
"learning_rate": 7.503545930910804e-05,
|
52 |
+
"num_train_epochs": 5,
|
53 |
"per_device_train_batch_size": 8,
|
54 |
+
"seed": 19
|
55 |
}
|
56 |
}
|
run-0/checkpoint-216/training_args.bin
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 4920
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:34840284466eb2205f9f10ed091682dc9319569f75d63faf8795d5f603abf328
|
3 |
size 4920
|
run-0/checkpoint-288/model.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 267829484
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:bd72cc626b9bd73f62e3b8bb9f804a912cd0093a5387220406ed93b8dbf25719
|
3 |
size 267829484
|
run-0/checkpoint-288/optimizer.pt
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 535721146
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:92d34b6acb233e4eef89844a3cd9d9d458cb34b28d3d0af52769b21ba404bbf6
|
3 |
size 535721146
|
run-0/checkpoint-288/rng_state.pth
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 14244
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:83748ddeabe37c7bf34016f1f688ab7b86f864f4418190708ecf56161a5a5532
|
3 |
size 14244
|
run-0/checkpoint-288/scheduler.pt
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 1064
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e4e2fc1332c26a809e04b01288d2d96a106fb8f047fa634620469656307ec058
|
3 |
size 1064
|
run-0/checkpoint-288/trainer_state.json
CHANGED
@@ -1,7 +1,7 @@
|
|
1 |
{
|
2 |
-
"best_metric": 0.
|
3 |
"best_model_checkpoint": "distilbert-base-uncased-finetuned-stsb/run-0/checkpoint-288",
|
4 |
-
"epoch":
|
5 |
"eval_steps": 500,
|
6 |
"global_step": 288,
|
7 |
"is_hyper_param_search": true,
|
@@ -10,37 +10,57 @@
|
|
10 |
"log_history": [
|
11 |
{
|
12 |
"epoch": 1.0,
|
13 |
-
"eval_loss":
|
14 |
-
"eval_pearson": 0.
|
15 |
-
"eval_runtime": 0.
|
16 |
-
"eval_samples_per_second":
|
17 |
-
"eval_spearmanr": 0.
|
18 |
-
"eval_steps_per_second":
|
19 |
-
"step":
|
20 |
},
|
21 |
{
|
22 |
"epoch": 2.0,
|
23 |
-
"eval_loss": 0.
|
24 |
-
"eval_pearson": 0.
|
25 |
-
"eval_runtime":
|
26 |
-
"eval_samples_per_second":
|
27 |
-
"eval_spearmanr": 0.
|
28 |
-
"eval_steps_per_second":
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
29 |
"step": 288
|
30 |
}
|
31 |
],
|
32 |
"logging_steps": 500,
|
33 |
-
"max_steps":
|
34 |
"num_input_tokens_seen": 0,
|
35 |
"num_train_epochs": 5,
|
36 |
"save_steps": 500,
|
37 |
"total_flos": 0,
|
38 |
-
"train_batch_size":
|
39 |
"trial_name": null,
|
40 |
"trial_params": {
|
41 |
-
"learning_rate":
|
42 |
"num_train_epochs": 5,
|
43 |
-
"per_device_train_batch_size":
|
44 |
-
"seed":
|
45 |
}
|
46 |
}
|
|
|
1 |
{
|
2 |
+
"best_metric": 0.8386197210710133,
|
3 |
"best_model_checkpoint": "distilbert-base-uncased-finetuned-stsb/run-0/checkpoint-288",
|
4 |
+
"epoch": 4.0,
|
5 |
"eval_steps": 500,
|
6 |
"global_step": 288,
|
7 |
"is_hyper_param_search": true,
|
|
|
10 |
"log_history": [
|
11 |
{
|
12 |
"epoch": 1.0,
|
13 |
+
"eval_loss": 1.1317814588546753,
|
14 |
+
"eval_pearson": 0.7925397117075783,
|
15 |
+
"eval_runtime": 0.9029,
|
16 |
+
"eval_samples_per_second": 1661.35,
|
17 |
+
"eval_spearmanr": 0.7981526098929177,
|
18 |
+
"eval_steps_per_second": 104.111,
|
19 |
+
"step": 72
|
20 |
},
|
21 |
{
|
22 |
"epoch": 2.0,
|
23 |
+
"eval_loss": 0.8140504360198975,
|
24 |
+
"eval_pearson": 0.8184498144194529,
|
25 |
+
"eval_runtime": 1.1042,
|
26 |
+
"eval_samples_per_second": 1358.431,
|
27 |
+
"eval_spearmanr": 0.8199506621029472,
|
28 |
+
"eval_steps_per_second": 85.128,
|
29 |
+
"step": 144
|
30 |
+
},
|
31 |
+
{
|
32 |
+
"epoch": 3.0,
|
33 |
+
"eval_loss": 0.7895862460136414,
|
34 |
+
"eval_pearson": 0.8350533952135925,
|
35 |
+
"eval_runtime": 1.2556,
|
36 |
+
"eval_samples_per_second": 1194.637,
|
37 |
+
"eval_spearmanr": 0.8311768906317339,
|
38 |
+
"eval_steps_per_second": 74.864,
|
39 |
+
"step": 216
|
40 |
+
},
|
41 |
+
{
|
42 |
+
"epoch": 4.0,
|
43 |
+
"eval_loss": 0.6798363924026489,
|
44 |
+
"eval_pearson": 0.8386197210710133,
|
45 |
+
"eval_runtime": 0.9324,
|
46 |
+
"eval_samples_per_second": 1608.739,
|
47 |
+
"eval_spearmanr": 0.835886505973161,
|
48 |
+
"eval_steps_per_second": 100.814,
|
49 |
"step": 288
|
50 |
}
|
51 |
],
|
52 |
"logging_steps": 500,
|
53 |
+
"max_steps": 360,
|
54 |
"num_input_tokens_seen": 0,
|
55 |
"num_train_epochs": 5,
|
56 |
"save_steps": 500,
|
57 |
"total_flos": 0,
|
58 |
+
"train_batch_size": 8,
|
59 |
"trial_name": null,
|
60 |
"trial_params": {
|
61 |
+
"learning_rate": 7.503545930910804e-05,
|
62 |
"num_train_epochs": 5,
|
63 |
+
"per_device_train_batch_size": 8,
|
64 |
+
"seed": 19
|
65 |
}
|
66 |
}
|
run-0/checkpoint-288/training_args.bin
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 4920
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:34840284466eb2205f9f10ed091682dc9319569f75d63faf8795d5f603abf328
|
3 |
size 4920
|
run-0/checkpoint-360/config.json
ADDED
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "distilbert-base-uncased",
|
3 |
+
"activation": "gelu",
|
4 |
+
"architectures": [
|
5 |
+
"DistilBertForSequenceClassification"
|
6 |
+
],
|
7 |
+
"attention_dropout": 0.1,
|
8 |
+
"dim": 768,
|
9 |
+
"dropout": 0.1,
|
10 |
+
"hidden_dim": 3072,
|
11 |
+
"id2label": {
|
12 |
+
"0": "LABEL_0"
|
13 |
+
},
|
14 |
+
"initializer_range": 0.02,
|
15 |
+
"label2id": {
|
16 |
+
"LABEL_0": 0
|
17 |
+
},
|
18 |
+
"max_position_embeddings": 512,
|
19 |
+
"model_type": "distilbert",
|
20 |
+
"n_heads": 12,
|
21 |
+
"n_layers": 6,
|
22 |
+
"pad_token_id": 0,
|
23 |
+
"problem_type": "regression",
|
24 |
+
"qa_dropout": 0.1,
|
25 |
+
"seq_classif_dropout": 0.2,
|
26 |
+
"sinusoidal_pos_embds": false,
|
27 |
+
"tie_weights_": true,
|
28 |
+
"torch_dtype": "float32",
|
29 |
+
"transformers_version": "4.38.2",
|
30 |
+
"vocab_size": 30522
|
31 |
+
}
|
run-0/checkpoint-360/model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:377a6302f6781628151f9f27723e49c3f1fad19613b8f58c132fe21f1455d934
|
3 |
+
size 267829484
|
run-0/checkpoint-360/optimizer.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b40d19c18b0bd55aacc51bb47d9bb6f8b44dd1cc52bef52a84c4f31857ed059c
|
3 |
+
size 535721146
|
run-0/checkpoint-360/rng_state.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:868503269df12d980e3bc129f108241731a66e8cdb1cca8a0afd2c4aa9be1618
|
3 |
+
size 14244
|
run-0/checkpoint-360/scheduler.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9916d34da97289ad0564254ee346eb70a822eaba405e175fb372d13279f5f367
|
3 |
+
size 1064
|
run-0/checkpoint-360/special_tokens_map.json
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"cls_token": "[CLS]",
|
3 |
+
"mask_token": "[MASK]",
|
4 |
+
"pad_token": "[PAD]",
|
5 |
+
"sep_token": "[SEP]",
|
6 |
+
"unk_token": "[UNK]"
|
7 |
+
}
|
run-0/checkpoint-360/tokenizer.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
run-0/checkpoint-360/tokenizer_config.json
ADDED
@@ -0,0 +1,55 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"added_tokens_decoder": {
|
3 |
+
"0": {
|
4 |
+
"content": "[PAD]",
|
5 |
+
"lstrip": false,
|
6 |
+
"normalized": false,
|
7 |
+
"rstrip": false,
|
8 |
+
"single_word": false,
|
9 |
+
"special": true
|
10 |
+
},
|
11 |
+
"100": {
|
12 |
+
"content": "[UNK]",
|
13 |
+
"lstrip": false,
|
14 |
+
"normalized": false,
|
15 |
+
"rstrip": false,
|
16 |
+
"single_word": false,
|
17 |
+
"special": true
|
18 |
+
},
|
19 |
+
"101": {
|
20 |
+
"content": "[CLS]",
|
21 |
+
"lstrip": false,
|
22 |
+
"normalized": false,
|
23 |
+
"rstrip": false,
|
24 |
+
"single_word": false,
|
25 |
+
"special": true
|
26 |
+
},
|
27 |
+
"102": {
|
28 |
+
"content": "[SEP]",
|
29 |
+
"lstrip": false,
|
30 |
+
"normalized": false,
|
31 |
+
"rstrip": false,
|
32 |
+
"single_word": false,
|
33 |
+
"special": true
|
34 |
+
},
|
35 |
+
"103": {
|
36 |
+
"content": "[MASK]",
|
37 |
+
"lstrip": false,
|
38 |
+
"normalized": false,
|
39 |
+
"rstrip": false,
|
40 |
+
"single_word": false,
|
41 |
+
"special": true
|
42 |
+
}
|
43 |
+
},
|
44 |
+
"clean_up_tokenization_spaces": true,
|
45 |
+
"cls_token": "[CLS]",
|
46 |
+
"do_lower_case": true,
|
47 |
+
"mask_token": "[MASK]",
|
48 |
+
"model_max_length": 512,
|
49 |
+
"pad_token": "[PAD]",
|
50 |
+
"sep_token": "[SEP]",
|
51 |
+
"strip_accents": null,
|
52 |
+
"tokenize_chinese_chars": true,
|
53 |
+
"tokenizer_class": "DistilBertTokenizer",
|
54 |
+
"unk_token": "[UNK]"
|
55 |
+
}
|
run-0/checkpoint-360/trainer_state.json
ADDED
@@ -0,0 +1,76 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"best_metric": 0.842503947673261,
|
3 |
+
"best_model_checkpoint": "distilbert-base-uncased-finetuned-stsb/run-0/checkpoint-360",
|
4 |
+
"epoch": 5.0,
|
5 |
+
"eval_steps": 500,
|
6 |
+
"global_step": 360,
|
7 |
+
"is_hyper_param_search": true,
|
8 |
+
"is_local_process_zero": true,
|
9 |
+
"is_world_process_zero": true,
|
10 |
+
"log_history": [
|
11 |
+
{
|
12 |
+
"epoch": 1.0,
|
13 |
+
"eval_loss": 1.1317814588546753,
|
14 |
+
"eval_pearson": 0.7925397117075783,
|
15 |
+
"eval_runtime": 0.9029,
|
16 |
+
"eval_samples_per_second": 1661.35,
|
17 |
+
"eval_spearmanr": 0.7981526098929177,
|
18 |
+
"eval_steps_per_second": 104.111,
|
19 |
+
"step": 72
|
20 |
+
},
|
21 |
+
{
|
22 |
+
"epoch": 2.0,
|
23 |
+
"eval_loss": 0.8140504360198975,
|
24 |
+
"eval_pearson": 0.8184498144194529,
|
25 |
+
"eval_runtime": 1.1042,
|
26 |
+
"eval_samples_per_second": 1358.431,
|
27 |
+
"eval_spearmanr": 0.8199506621029472,
|
28 |
+
"eval_steps_per_second": 85.128,
|
29 |
+
"step": 144
|
30 |
+
},
|
31 |
+
{
|
32 |
+
"epoch": 3.0,
|
33 |
+
"eval_loss": 0.7895862460136414,
|
34 |
+
"eval_pearson": 0.8350533952135925,
|
35 |
+
"eval_runtime": 1.2556,
|
36 |
+
"eval_samples_per_second": 1194.637,
|
37 |
+
"eval_spearmanr": 0.8311768906317339,
|
38 |
+
"eval_steps_per_second": 74.864,
|
39 |
+
"step": 216
|
40 |
+
},
|
41 |
+
{
|
42 |
+
"epoch": 4.0,
|
43 |
+
"eval_loss": 0.6798363924026489,
|
44 |
+
"eval_pearson": 0.8386197210710133,
|
45 |
+
"eval_runtime": 0.9324,
|
46 |
+
"eval_samples_per_second": 1608.739,
|
47 |
+
"eval_spearmanr": 0.835886505973161,
|
48 |
+
"eval_steps_per_second": 100.814,
|
49 |
+
"step": 288
|
50 |
+
},
|
51 |
+
{
|
52 |
+
"epoch": 5.0,
|
53 |
+
"eval_loss": 0.6598748564720154,
|
54 |
+
"eval_pearson": 0.842503947673261,
|
55 |
+
"eval_runtime": 0.8801,
|
56 |
+
"eval_samples_per_second": 1704.325,
|
57 |
+
"eval_spearmanr": 0.8388055898329965,
|
58 |
+
"eval_steps_per_second": 106.804,
|
59 |
+
"step": 360
|
60 |
+
}
|
61 |
+
],
|
62 |
+
"logging_steps": 500,
|
63 |
+
"max_steps": 360,
|
64 |
+
"num_input_tokens_seen": 0,
|
65 |
+
"num_train_epochs": 5,
|
66 |
+
"save_steps": 500,
|
67 |
+
"total_flos": 0,
|
68 |
+
"train_batch_size": 8,
|
69 |
+
"trial_name": null,
|
70 |
+
"trial_params": {
|
71 |
+
"learning_rate": 7.503545930910804e-05,
|
72 |
+
"num_train_epochs": 5,
|
73 |
+
"per_device_train_batch_size": 8,
|
74 |
+
"seed": 19
|
75 |
+
}
|
76 |
+
}
|
run-0/checkpoint-360/training_args.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:34840284466eb2205f9f10ed091682dc9319569f75d63faf8795d5f603abf328
|
3 |
+
size 4920
|
run-0/checkpoint-360/vocab.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
run-1/checkpoint-144/model.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 267829484
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:07638e968314d4f617e04e8bcec63075090eb03937772d78c457cbdbc0fd0233
|
3 |
size 267829484
|
run-1/checkpoint-144/optimizer.pt
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 535721146
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:575ac9ee9917d1142f451600d60b63b8abfdaf1465d1d6bb70dc81b153a9e497
|
3 |
size 535721146
|
run-1/checkpoint-144/rng_state.pth
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 14244
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:005af92a12eb8f22863bb8349d3f12df2dc4d57ee802f0b6a578f36089f9bd80
|
3 |
size 14244
|
run-1/checkpoint-144/scheduler.pt
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 1064
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:285714c93a29569e674c25b736b6240ba6fdf17837601450f031cdad2cd3175d
|
3 |
size 1064
|
run-1/checkpoint-144/trainer_state.json
CHANGED
@@ -1,5 +1,5 @@
|
|
1 |
{
|
2 |
-
"best_metric": 0.
|
3 |
"best_model_checkpoint": "distilbert-base-uncased-finetuned-stsb/run-1/checkpoint-144",
|
4 |
"epoch": 2.0,
|
5 |
"eval_steps": 500,
|
@@ -10,37 +10,37 @@
|
|
10 |
"log_history": [
|
11 |
{
|
12 |
"epoch": 1.0,
|
13 |
-
"eval_loss":
|
14 |
-
"eval_pearson": 0.
|
15 |
-
"eval_runtime": 0.
|
16 |
-
"eval_samples_per_second":
|
17 |
-
"eval_spearmanr": 0.
|
18 |
-
"eval_steps_per_second":
|
19 |
"step": 72
|
20 |
},
|
21 |
{
|
22 |
"epoch": 2.0,
|
23 |
-
"eval_loss":
|
24 |
-
"eval_pearson": 0.
|
25 |
-
"eval_runtime":
|
26 |
-
"eval_samples_per_second":
|
27 |
-
"eval_spearmanr": 0.
|
28 |
-
"eval_steps_per_second":
|
29 |
"step": 144
|
30 |
}
|
31 |
],
|
32 |
"logging_steps": 500,
|
33 |
-
"max_steps":
|
34 |
"num_input_tokens_seen": 0,
|
35 |
-
"num_train_epochs":
|
36 |
"save_steps": 500,
|
37 |
"total_flos": 0,
|
38 |
"train_batch_size": 8,
|
39 |
"trial_name": null,
|
40 |
"trial_params": {
|
41 |
-
"learning_rate":
|
42 |
-
"num_train_epochs":
|
43 |
"per_device_train_batch_size": 8,
|
44 |
-
"seed":
|
45 |
}
|
46 |
}
|
|
|
1 |
{
|
2 |
+
"best_metric": 0.3042437087139711,
|
3 |
"best_model_checkpoint": "distilbert-base-uncased-finetuned-stsb/run-1/checkpoint-144",
|
4 |
"epoch": 2.0,
|
5 |
"eval_steps": 500,
|
|
|
10 |
"log_history": [
|
11 |
{
|
12 |
"epoch": 1.0,
|
13 |
+
"eval_loss": 4.080541133880615,
|
14 |
+
"eval_pearson": 0.19568172531555392,
|
15 |
+
"eval_runtime": 0.8653,
|
16 |
+
"eval_samples_per_second": 1733.563,
|
17 |
+
"eval_spearmanr": 0.19374973140130425,
|
18 |
+
"eval_steps_per_second": 108.637,
|
19 |
"step": 72
|
20 |
},
|
21 |
{
|
22 |
"epoch": 2.0,
|
23 |
+
"eval_loss": 2.333472728729248,
|
24 |
+
"eval_pearson": 0.3042437087139711,
|
25 |
+
"eval_runtime": 0.8835,
|
26 |
+
"eval_samples_per_second": 1697.883,
|
27 |
+
"eval_spearmanr": 0.3379135804732249,
|
28 |
+
"eval_steps_per_second": 106.401,
|
29 |
"step": 144
|
30 |
}
|
31 |
],
|
32 |
"logging_steps": 500,
|
33 |
+
"max_steps": 288,
|
34 |
"num_input_tokens_seen": 0,
|
35 |
+
"num_train_epochs": 4,
|
36 |
"save_steps": 500,
|
37 |
"total_flos": 0,
|
38 |
"train_batch_size": 8,
|
39 |
"trial_name": null,
|
40 |
"trial_params": {
|
41 |
+
"learning_rate": 2.9518876446612178e-06,
|
42 |
+
"num_train_epochs": 4,
|
43 |
"per_device_train_batch_size": 8,
|
44 |
+
"seed": 1
|
45 |
}
|
46 |
}
|
run-1/checkpoint-144/training_args.bin
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 4920
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d9b02230c742da2d3d1d91021b9907baf4baff2dbddfb2a93f01fb7c904cc297
|
3 |
size 4920
|
run-1/checkpoint-216/model.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 267829484
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e9212b4591b20e14593940687b34c11425984a576f8a383677b1c795cc5a31fe
|
3 |
size 267829484
|
run-1/checkpoint-216/optimizer.pt
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 535721146
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:15d5c55b91788e73c27a8e54b4ca2adcea4a7b08ba43305ef27d4704d6fe3f6d
|
3 |
size 535721146
|
run-1/checkpoint-216/rng_state.pth
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 14244
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:91d838dfaadb054514ed3333fbb35c61371e290ca7af5f594cb2edf539f8db82
|
3 |
size 14244
|
run-1/checkpoint-216/scheduler.pt
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 1064
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:45e7e74dd3e2e4a6fedaa2ace92b07f1c071e20a8d810bac800e1f24ac5cbe72
|
3 |
size 1064
|
run-1/checkpoint-216/trainer_state.json
CHANGED
@@ -1,5 +1,5 @@
|
|
1 |
{
|
2 |
-
"best_metric": 0.
|
3 |
"best_model_checkpoint": "distilbert-base-uncased-finetuned-stsb/run-1/checkpoint-216",
|
4 |
"epoch": 3.0,
|
5 |
"eval_steps": 500,
|
@@ -10,47 +10,47 @@
|
|
10 |
"log_history": [
|
11 |
{
|
12 |
"epoch": 1.0,
|
13 |
-
"eval_loss":
|
14 |
-
"eval_pearson": 0.
|
15 |
-
"eval_runtime": 0.
|
16 |
-
"eval_samples_per_second":
|
17 |
-
"eval_spearmanr": 0.
|
18 |
-
"eval_steps_per_second":
|
19 |
"step": 72
|
20 |
},
|
21 |
{
|
22 |
"epoch": 2.0,
|
23 |
-
"eval_loss":
|
24 |
-
"eval_pearson": 0.
|
25 |
-
"eval_runtime":
|
26 |
-
"eval_samples_per_second":
|
27 |
-
"eval_spearmanr": 0.
|
28 |
-
"eval_steps_per_second":
|
29 |
"step": 144
|
30 |
},
|
31 |
{
|
32 |
"epoch": 3.0,
|
33 |
-
"eval_loss":
|
34 |
-
"eval_pearson": 0.
|
35 |
-
"eval_runtime":
|
36 |
-
"eval_samples_per_second":
|
37 |
-
"eval_spearmanr": 0.
|
38 |
-
"eval_steps_per_second":
|
39 |
"step": 216
|
40 |
}
|
41 |
],
|
42 |
"logging_steps": 500,
|
43 |
-
"max_steps":
|
44 |
"num_input_tokens_seen": 0,
|
45 |
-
"num_train_epochs":
|
46 |
"save_steps": 500,
|
47 |
"total_flos": 0,
|
48 |
"train_batch_size": 8,
|
49 |
"trial_name": null,
|
50 |
"trial_params": {
|
51 |
-
"learning_rate":
|
52 |
-
"num_train_epochs":
|
53 |
"per_device_train_batch_size": 8,
|
54 |
-
"seed":
|
55 |
}
|
56 |
}
|
|
|
1 |
{
|
2 |
+
"best_metric": 0.40073101077295625,
|
3 |
"best_model_checkpoint": "distilbert-base-uncased-finetuned-stsb/run-1/checkpoint-216",
|
4 |
"epoch": 3.0,
|
5 |
"eval_steps": 500,
|
|
|
10 |
"log_history": [
|
11 |
{
|
12 |
"epoch": 1.0,
|
13 |
+
"eval_loss": 4.080541133880615,
|
14 |
+
"eval_pearson": 0.19568172531555392,
|
15 |
+
"eval_runtime": 0.8653,
|
16 |
+
"eval_samples_per_second": 1733.563,
|
17 |
+
"eval_spearmanr": 0.19374973140130425,
|
18 |
+
"eval_steps_per_second": 108.637,
|
19 |
"step": 72
|
20 |
},
|
21 |
{
|
22 |
"epoch": 2.0,
|
23 |
+
"eval_loss": 2.333472728729248,
|
24 |
+
"eval_pearson": 0.3042437087139711,
|
25 |
+
"eval_runtime": 0.8835,
|
26 |
+
"eval_samples_per_second": 1697.883,
|
27 |
+
"eval_spearmanr": 0.3379135804732249,
|
28 |
+
"eval_steps_per_second": 106.401,
|
29 |
"step": 144
|
30 |
},
|
31 |
{
|
32 |
"epoch": 3.0,
|
33 |
+
"eval_loss": 2.1389713287353516,
|
34 |
+
"eval_pearson": 0.40073101077295625,
|
35 |
+
"eval_runtime": 0.8693,
|
36 |
+
"eval_samples_per_second": 1725.481,
|
37 |
+
"eval_spearmanr": 0.4525934449018444,
|
38 |
+
"eval_steps_per_second": 108.13,
|
39 |
"step": 216
|
40 |
}
|
41 |
],
|
42 |
"logging_steps": 500,
|
43 |
+
"max_steps": 288,
|
44 |
"num_input_tokens_seen": 0,
|
45 |
+
"num_train_epochs": 4,
|
46 |
"save_steps": 500,
|
47 |
"total_flos": 0,
|
48 |
"train_batch_size": 8,
|
49 |
"trial_name": null,
|
50 |
"trial_params": {
|
51 |
+
"learning_rate": 2.9518876446612178e-06,
|
52 |
+
"num_train_epochs": 4,
|
53 |
"per_device_train_batch_size": 8,
|
54 |
+
"seed": 1
|
55 |
}
|
56 |
}
|
run-1/checkpoint-216/training_args.bin
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 4920
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d9b02230c742da2d3d1d91021b9907baf4baff2dbddfb2a93f01fb7c904cc297
|
3 |
size 4920
|
run-1/checkpoint-288/config.json
ADDED
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "distilbert-base-uncased",
|
3 |
+
"activation": "gelu",
|
4 |
+
"architectures": [
|
5 |
+
"DistilBertForSequenceClassification"
|
6 |
+
],
|
7 |
+
"attention_dropout": 0.1,
|
8 |
+
"dim": 768,
|
9 |
+
"dropout": 0.1,
|
10 |
+
"hidden_dim": 3072,
|
11 |
+
"id2label": {
|
12 |
+
"0": "LABEL_0"
|
13 |
+
},
|
14 |
+
"initializer_range": 0.02,
|
15 |
+
"label2id": {
|
16 |
+
"LABEL_0": 0
|
17 |
+
},
|
18 |
+
"max_position_embeddings": 512,
|
19 |
+
"model_type": "distilbert",
|
20 |
+
"n_heads": 12,
|
21 |
+
"n_layers": 6,
|
22 |
+
"pad_token_id": 0,
|
23 |
+
"problem_type": "regression",
|
24 |
+
"qa_dropout": 0.1,
|
25 |
+
"seq_classif_dropout": 0.2,
|
26 |
+
"sinusoidal_pos_embds": false,
|
27 |
+
"tie_weights_": true,
|
28 |
+
"torch_dtype": "float32",
|
29 |
+
"transformers_version": "4.38.2",
|
30 |
+
"vocab_size": 30522
|
31 |
+
}
|
run-1/checkpoint-288/model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:23b567d646bfe9cd39c54fca479e6e56c7970bad7a34c3a881d532deff562ca3
|
3 |
+
size 267829484
|
run-1/checkpoint-288/optimizer.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:87ba4d89a069be34ba3b373ba06f061afc698dc5aa5101dca8f3659504b4754c
|
3 |
+
size 535721146
|
run-1/checkpoint-288/rng_state.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c5588ffa63a15c785e79c68e9ea894c1c2c115cf05b0ca9f0064d62ed34f23c6
|
3 |
+
size 14244
|
run-1/checkpoint-288/scheduler.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c7fcb7d8cc6e558b75f9bd94eca34ef2bc1a99e21838dd3708d69f9f5b8b55e1
|
3 |
+
size 1064
|
run-1/checkpoint-288/special_tokens_map.json
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"cls_token": "[CLS]",
|
3 |
+
"mask_token": "[MASK]",
|
4 |
+
"pad_token": "[PAD]",
|
5 |
+
"sep_token": "[SEP]",
|
6 |
+
"unk_token": "[UNK]"
|
7 |
+
}
|
run-1/checkpoint-288/tokenizer.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
run-1/checkpoint-288/tokenizer_config.json
ADDED
@@ -0,0 +1,55 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"added_tokens_decoder": {
|
3 |
+
"0": {
|
4 |
+
"content": "[PAD]",
|
5 |
+
"lstrip": false,
|
6 |
+
"normalized": false,
|
7 |
+
"rstrip": false,
|
8 |
+
"single_word": false,
|
9 |
+
"special": true
|
10 |
+
},
|
11 |
+
"100": {
|
12 |
+
"content": "[UNK]",
|
13 |
+
"lstrip": false,
|
14 |
+
"normalized": false,
|
15 |
+
"rstrip": false,
|
16 |
+
"single_word": false,
|
17 |
+
"special": true
|
18 |
+
},
|
19 |
+
"101": {
|
20 |
+
"content": "[CLS]",
|
21 |
+
"lstrip": false,
|
22 |
+
"normalized": false,
|
23 |
+
"rstrip": false,
|
24 |
+
"single_word": false,
|
25 |
+
"special": true
|
26 |
+
},
|
27 |
+
"102": {
|
28 |
+
"content": "[SEP]",
|
29 |
+
"lstrip": false,
|
30 |
+
"normalized": false,
|
31 |
+
"rstrip": false,
|
32 |
+
"single_word": false,
|
33 |
+
"special": true
|
34 |
+
},
|
35 |
+
"103": {
|
36 |
+
"content": "[MASK]",
|
37 |
+
"lstrip": false,
|
38 |
+
"normalized": false,
|
39 |
+
"rstrip": false,
|
40 |
+
"single_word": false,
|
41 |
+
"special": true
|
42 |
+
}
|
43 |
+
},
|
44 |
+
"clean_up_tokenization_spaces": true,
|
45 |
+
"cls_token": "[CLS]",
|
46 |
+
"do_lower_case": true,
|
47 |
+
"mask_token": "[MASK]",
|
48 |
+
"model_max_length": 512,
|
49 |
+
"pad_token": "[PAD]",
|
50 |
+
"sep_token": "[SEP]",
|
51 |
+
"strip_accents": null,
|
52 |
+
"tokenize_chinese_chars": true,
|
53 |
+
"tokenizer_class": "DistilBertTokenizer",
|
54 |
+
"unk_token": "[UNK]"
|
55 |
+
}
|