Upload tokenizer
Browse files- tokenizer.json +4 -2
- tokenizer_config.json +8 -1
tokenizer.json
CHANGED
@@ -2,12 +2,14 @@
|
|
2 |
"version": "1.0",
|
3 |
"truncation": {
|
4 |
"direction": "Right",
|
5 |
-
"max_length":
|
6 |
"strategy": "LongestFirst",
|
7 |
"stride": 0
|
8 |
},
|
9 |
"padding": {
|
10 |
-
"strategy":
|
|
|
|
|
11 |
"direction": "Right",
|
12 |
"pad_to_multiple_of": null,
|
13 |
"pad_id": 0,
|
|
|
2 |
"version": "1.0",
|
3 |
"truncation": {
|
4 |
"direction": "Right",
|
5 |
+
"max_length": 256,
|
6 |
"strategy": "LongestFirst",
|
7 |
"stride": 0
|
8 |
},
|
9 |
"padding": {
|
10 |
+
"strategy": {
|
11 |
+
"Fixed": 256
|
12 |
+
},
|
13 |
"direction": "Right",
|
14 |
"pad_to_multiple_of": null,
|
15 |
"pad_id": 0,
|
tokenizer_config.json
CHANGED
@@ -59,7 +59,7 @@
|
|
59 |
}
|
60 |
},
|
61 |
"bos_token": "<s>",
|
62 |
-
"clean_up_tokenization_spaces":
|
63 |
"cls_token": "[CLS]",
|
64 |
"eos_token": "</s>",
|
65 |
"errors": "replace",
|
@@ -67,10 +67,17 @@
|
|
67 |
"full_tokenizer_file": null,
|
68 |
"mask_token": "[MASK]",
|
69 |
"max_len": 512,
|
|
|
70 |
"model_max_length": 512,
|
|
|
71 |
"pad_token": "[PAD]",
|
|
|
|
|
72 |
"sep_token": "[SEP]",
|
|
|
73 |
"tokenizer_class": "RobertaTokenizer",
|
74 |
"trim_offsets": true,
|
|
|
|
|
75 |
"unk_token": "[UNK]"
|
76 |
}
|
|
|
59 |
}
|
60 |
},
|
61 |
"bos_token": "<s>",
|
62 |
+
"clean_up_tokenization_spaces": true,
|
63 |
"cls_token": "[CLS]",
|
64 |
"eos_token": "</s>",
|
65 |
"errors": "replace",
|
|
|
67 |
"full_tokenizer_file": null,
|
68 |
"mask_token": "[MASK]",
|
69 |
"max_len": 512,
|
70 |
+
"max_length": 512,
|
71 |
"model_max_length": 512,
|
72 |
+
"pad_to_multiple_of": null,
|
73 |
"pad_token": "[PAD]",
|
74 |
+
"pad_token_type_id": 0,
|
75 |
+
"padding_side": "right",
|
76 |
"sep_token": "[SEP]",
|
77 |
+
"stride": 0,
|
78 |
"tokenizer_class": "RobertaTokenizer",
|
79 |
"trim_offsets": true,
|
80 |
+
"truncation_side": "right",
|
81 |
+
"truncation_strategy": "longest_first",
|
82 |
"unk_token": "[UNK]"
|
83 |
}
|