xkaska02 commited on
Commit
bfafb61
·
verified ·
1 Parent(s): df9eebb

End of training

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
README.md CHANGED
@@ -1,7 +1,7 @@
1
  ---
2
  library_name: transformers
3
- license: cc-by-nc-sa-4.0
4
- base_model: ufal/robeczech-base
5
  tags:
6
  - generated_from_trainer
7
  metrics:
@@ -19,13 +19,13 @@ should probably proofread and complete it, then remove this comment. -->
19
 
20
  # robeczech_lr3e-05_bs16_train287
21
 
22
- This model is a fine-tuned version of [ufal/robeczech-base](https://huggingface.co/ufal/robeczech-base) on an unknown dataset.
23
  It achieves the following results on the evaluation set:
24
- - Loss: 0.1742
25
- - Precision: 0.9465
26
- - Recall: 0.9588
27
- - F1: 0.9526
28
- - Accuracy: 0.9719
29
 
30
  ## Model description
31
 
@@ -56,24 +56,17 @@ The following hyperparameters were used during training:
56
 
57
  | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy |
58
  |:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:|
59
- | No log | 1.0 | 18 | 1.2586 | 0.3462 | 0.0217 | 0.0409 | 0.5760 |
60
- | No log | 2.0 | 36 | 0.8008 | 0.6946 | 0.6972 | 0.6959 | 0.8562 |
61
- | No log | 3.0 | 54 | 0.5170 | 0.7920 | 0.7904 | 0.7912 | 0.9000 |
62
- | No log | 4.0 | 72 | 0.3632 | 0.8889 | 0.8885 | 0.8887 | 0.9433 |
63
- | No log | 5.0 | 90 | 0.3043 | 0.8967 | 0.8967 | 0.8967 | 0.9475 |
64
- | No log | 6.0 | 108 | 0.2746 | 0.8952 | 0.8996 | 0.8974 | 0.9485 |
65
- | No log | 7.0 | 126 | 0.2490 | 0.9023 | 0.9092 | 0.9057 | 0.9523 |
66
- | No log | 8.0 | 144 | 0.2249 | 0.9252 | 0.9314 | 0.9283 | 0.9619 |
67
- | No log | 9.0 | 162 | 0.2103 | 0.9333 | 0.9387 | 0.9360 | 0.9657 |
68
- | No log | 10.0 | 180 | 0.2057 | 0.9408 | 0.9358 | 0.9383 | 0.9661 |
69
- | No log | 11.0 | 198 | 0.1937 | 0.9439 | 0.9421 | 0.9430 | 0.9684 |
70
- | No log | 12.0 | 216 | 0.1963 | 0.9342 | 0.9387 | 0.9364 | 0.9659 |
71
- | No log | 13.0 | 234 | 0.1851 | 0.9458 | 0.9430 | 0.9444 | 0.9688 |
72
- | No log | 14.0 | 252 | 0.1734 | 0.9447 | 0.9493 | 0.9470 | 0.9707 |
73
- | No log | 15.0 | 270 | 0.1686 | 0.9457 | 0.9503 | 0.9480 | 0.9713 |
74
- | No log | 16.0 | 288 | 0.1745 | 0.9446 | 0.9459 | 0.9452 | 0.9701 |
75
- | No log | 17.0 | 306 | 0.1730 | 0.9429 | 0.9416 | 0.9423 | 0.9686 |
76
- | No log | 18.0 | 324 | 0.1707 | 0.9464 | 0.9464 | 0.9464 | 0.9705 |
77
 
78
 
79
  ### Framework versions
 
1
  ---
2
  library_name: transformers
3
+ license: mit
4
+ base_model: FacebookAI/xlm-roberta-base
5
  tags:
6
  - generated_from_trainer
7
  metrics:
 
19
 
20
  # robeczech_lr3e-05_bs16_train287
21
 
22
+ This model is a fine-tuned version of [FacebookAI/xlm-roberta-base](https://huggingface.co/FacebookAI/xlm-roberta-base) on an unknown dataset.
23
  It achieves the following results on the evaluation set:
24
+ - Loss: 0.1179
25
+ - Precision: 0.9454
26
+ - Recall: 0.9595
27
+ - F1: 0.9524
28
+ - Accuracy: 0.9714
29
 
30
  ## Model description
31
 
 
56
 
57
  | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy |
58
  |:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:|
59
+ | No log | 1.0 | 18 | 1.1550 | 1.0 | 0.0005 | 0.0010 | 0.5668 |
60
+ | No log | 2.0 | 36 | 0.4725 | 0.7099 | 0.7006 | 0.7052 | 0.8587 |
61
+ | No log | 3.0 | 54 | 0.2293 | 0.8740 | 0.8643 | 0.8691 | 0.9351 |
62
+ | No log | 4.0 | 72 | 0.1474 | 0.9224 | 0.9126 | 0.9175 | 0.9565 |
63
+ | No log | 5.0 | 90 | 0.1210 | 0.9457 | 0.9411 | 0.9434 | 0.9697 |
64
+ | No log | 6.0 | 108 | 0.1212 | 0.9409 | 0.9382 | 0.9396 | 0.9674 |
65
+ | No log | 7.0 | 126 | 0.1067 | 0.9540 | 0.9517 | 0.9529 | 0.9740 |
66
+ | No log | 8.0 | 144 | 0.0918 | 0.9574 | 0.9551 | 0.9562 | 0.9753 |
67
+ | No log | 9.0 | 162 | 0.1076 | 0.9549 | 0.9517 | 0.9533 | 0.9749 |
68
+ | No log | 10.0 | 180 | 0.0990 | 0.9599 | 0.9585 | 0.9592 | 0.9774 |
69
+ | No log | 11.0 | 198 | 0.1027 | 0.9673 | 0.9570 | 0.9621 | 0.9778 |
 
 
 
 
 
 
 
70
 
71
 
72
  ### Framework versions
config.json CHANGED
@@ -1,12 +1,11 @@
1
  {
2
  "architectures": [
3
- "RobertaForTokenClassification"
4
  ],
5
  "attention_probs_dropout_prob": 0.1,
6
  "bos_token_id": 0,
7
  "classifier_dropout": null,
8
  "eos_token_id": 2,
9
- "gradient_checkpointing": false,
10
  "hidden_act": "gelu",
11
  "hidden_dropout_prob": 0.1,
12
  "hidden_size": 768,
@@ -44,14 +43,15 @@
44
  },
45
  "layer_norm_eps": 1e-05,
46
  "max_position_embeddings": 514,
47
- "model_type": "roberta",
48
  "num_attention_heads": 12,
49
  "num_hidden_layers": 12,
 
50
  "pad_token_id": 1,
51
  "position_embedding_type": "absolute",
52
  "torch_dtype": "float32",
53
  "transformers_version": "4.51.3",
54
  "type_vocab_size": 1,
55
  "use_cache": true,
56
- "vocab_size": 51997
57
  }
 
1
  {
2
  "architectures": [
3
+ "XLMRobertaForTokenClassification"
4
  ],
5
  "attention_probs_dropout_prob": 0.1,
6
  "bos_token_id": 0,
7
  "classifier_dropout": null,
8
  "eos_token_id": 2,
 
9
  "hidden_act": "gelu",
10
  "hidden_dropout_prob": 0.1,
11
  "hidden_size": 768,
 
43
  },
44
  "layer_norm_eps": 1e-05,
45
  "max_position_embeddings": 514,
46
+ "model_type": "xlm-roberta",
47
  "num_attention_heads": 12,
48
  "num_hidden_layers": 12,
49
+ "output_past": true,
50
  "pad_token_id": 1,
51
  "position_embedding_type": "absolute",
52
  "torch_dtype": "float32",
53
  "transformers_version": "4.51.3",
54
  "type_vocab_size": 1,
55
  "use_cache": true,
56
+ "vocab_size": 250002
57
  }
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b47ecb94ac5f8b577d3abf55b982950e3e2270b3411866e47f71713d9ed65ed2
3
- size 501604780
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cd5ddaf56443ed926d9db9b29d05ebdda35b1bf1357bd511e563cadb5fee26de
3
+ size 1109876260
sentencepiece.bpe.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cfc8146abe2a0488e9e2a0c56de7952f7c11ab059eca145a0a727afce0db2865
3
+ size 5069051
special_tokens_map.json CHANGED
@@ -1,51 +1,15 @@
1
  {
2
- "bos_token": {
3
- "content": "[CLS]",
4
- "lstrip": false,
5
- "normalized": true,
6
- "rstrip": false,
7
- "single_word": false
8
- },
9
- "cls_token": {
10
- "content": "[CLS]",
11
- "lstrip": false,
12
- "normalized": true,
13
- "rstrip": false,
14
- "single_word": false
15
- },
16
- "eos_token": {
17
- "content": "[SEP]",
18
- "lstrip": false,
19
- "normalized": true,
20
- "rstrip": false,
21
- "single_word": false
22
- },
23
  "mask_token": {
24
- "content": "[MASK]",
25
  "lstrip": true,
26
- "normalized": true,
27
  "rstrip": false,
28
  "single_word": false
29
  },
30
- "pad_token": {
31
- "content": "[PAD]",
32
- "lstrip": false,
33
- "normalized": true,
34
- "rstrip": false,
35
- "single_word": false
36
- },
37
- "sep_token": {
38
- "content": "[SEP]",
39
- "lstrip": false,
40
- "normalized": true,
41
- "rstrip": false,
42
- "single_word": false
43
- },
44
- "unk_token": {
45
- "content": "[UNK]",
46
- "lstrip": false,
47
- "normalized": true,
48
- "rstrip": false,
49
- "single_word": false
50
- }
51
  }
 
1
  {
2
+ "bos_token": "<s>",
3
+ "cls_token": "<s>",
4
+ "eos_token": "</s>",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
  "mask_token": {
6
+ "content": "<mask>",
7
  "lstrip": true,
8
+ "normalized": false,
9
  "rstrip": false,
10
  "single_word": false
11
  },
12
+ "pad_token": "<pad>",
13
+ "sep_token": "</s>",
14
+ "unk_token": "<unk>"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
15
  }
tokenizer.json CHANGED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json CHANGED
@@ -2,57 +2,55 @@
2
  "add_prefix_space": true,
3
  "added_tokens_decoder": {
4
  "0": {
5
- "content": "[CLS]",
6
  "lstrip": false,
7
- "normalized": true,
8
  "rstrip": false,
9
  "single_word": false,
10
  "special": true
11
  },
12
  "1": {
13
- "content": "[PAD]",
14
  "lstrip": false,
15
- "normalized": true,
16
  "rstrip": false,
17
  "single_word": false,
18
  "special": true
19
  },
20
  "2": {
21
- "content": "[SEP]",
22
  "lstrip": false,
23
- "normalized": true,
24
  "rstrip": false,
25
  "single_word": false,
26
  "special": true
27
  },
28
  "3": {
29
- "content": "[UNK]",
30
  "lstrip": false,
31
- "normalized": true,
32
  "rstrip": false,
33
  "single_word": false,
34
  "special": true
35
  },
36
- "51960": {
37
- "content": "[MASK]",
38
  "lstrip": true,
39
- "normalized": true,
40
  "rstrip": false,
41
  "single_word": false,
42
  "special": true
43
  }
44
  },
45
- "bos_token": "[CLS]",
46
  "clean_up_tokenization_spaces": false,
47
- "cls_token": "[CLS]",
48
- "eos_token": "[SEP]",
49
- "errors": "replace",
50
  "extra_special_tokens": {},
51
- "mask_token": "[MASK]",
52
  "model_max_length": 512,
53
- "pad_token": "[PAD]",
54
- "sep_token": "[SEP]",
55
- "tokenizer_class": "RobertaTokenizer",
56
- "trim_offsets": true,
57
- "unk_token": "[UNK]"
58
  }
 
2
  "add_prefix_space": true,
3
  "added_tokens_decoder": {
4
  "0": {
5
+ "content": "<s>",
6
  "lstrip": false,
7
+ "normalized": false,
8
  "rstrip": false,
9
  "single_word": false,
10
  "special": true
11
  },
12
  "1": {
13
+ "content": "<pad>",
14
  "lstrip": false,
15
+ "normalized": false,
16
  "rstrip": false,
17
  "single_word": false,
18
  "special": true
19
  },
20
  "2": {
21
+ "content": "</s>",
22
  "lstrip": false,
23
+ "normalized": false,
24
  "rstrip": false,
25
  "single_word": false,
26
  "special": true
27
  },
28
  "3": {
29
+ "content": "<unk>",
30
  "lstrip": false,
31
+ "normalized": false,
32
  "rstrip": false,
33
  "single_word": false,
34
  "special": true
35
  },
36
+ "250001": {
37
+ "content": "<mask>",
38
  "lstrip": true,
39
+ "normalized": false,
40
  "rstrip": false,
41
  "single_word": false,
42
  "special": true
43
  }
44
  },
45
+ "bos_token": "<s>",
46
  "clean_up_tokenization_spaces": false,
47
+ "cls_token": "<s>",
48
+ "eos_token": "</s>",
 
49
  "extra_special_tokens": {},
50
+ "mask_token": "<mask>",
51
  "model_max_length": 512,
52
+ "pad_token": "<pad>",
53
+ "sep_token": "</s>",
54
+ "tokenizer_class": "XLMRobertaTokenizer",
55
+ "unk_token": "<unk>"
 
56
  }
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0fa54fc9fb5a9a5a7f829d6cacbc46e2e73172d1de3d627169e160018036e54a
3
  size 5368
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:72dbaf1afa3d24118d9b081fe26e204945f7f39bdf265210b20a287337532e5b
3
  size 5368