biomed-multi-omic
Biology
RNA
thrumbel commited on
Commit
0ffd3b7
·
verified ·
1 Parent(s): e3e3110

Commit of multi-tokenizer for bmfm-omics

Browse files
tokenizers/expressions/special_tokens_map.json ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": {
3
+ "content": "[CLS]",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "mask_token": {
10
+ "content": "[MASK]",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "[PAD]",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "sep_token": {
24
+ "content": "[SEP]",
25
+ "lstrip": false,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ },
30
+ "unk_token": {
31
+ "content": "[UNK]",
32
+ "lstrip": false,
33
+ "normalized": false,
34
+ "rstrip": false,
35
+ "single_word": false
36
+ }
37
+ }
tokenizers/expressions/tokenizer.json ADDED
@@ -0,0 +1,201 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "version": "1.0",
3
+ "truncation": null,
4
+ "padding": null,
5
+ "added_tokens": [
6
+ {
7
+ "id": 0,
8
+ "content": "[UNK]",
9
+ "single_word": false,
10
+ "lstrip": false,
11
+ "rstrip": false,
12
+ "normalized": false,
13
+ "special": true
14
+ },
15
+ {
16
+ "id": 1,
17
+ "content": "[SEP]",
18
+ "single_word": false,
19
+ "lstrip": false,
20
+ "rstrip": false,
21
+ "normalized": false,
22
+ "special": true
23
+ },
24
+ {
25
+ "id": 2,
26
+ "content": "[PAD]",
27
+ "single_word": false,
28
+ "lstrip": false,
29
+ "rstrip": false,
30
+ "normalized": false,
31
+ "special": true
32
+ },
33
+ {
34
+ "id": 3,
35
+ "content": "[CLS]",
36
+ "single_word": false,
37
+ "lstrip": false,
38
+ "rstrip": false,
39
+ "normalized": false,
40
+ "special": true
41
+ },
42
+ {
43
+ "id": 4,
44
+ "content": "[MASK]",
45
+ "single_word": false,
46
+ "lstrip": false,
47
+ "rstrip": false,
48
+ "normalized": false,
49
+ "special": true
50
+ }
51
+ ],
52
+ "normalizer": {
53
+ "type": "BertNormalizer",
54
+ "clean_text": true,
55
+ "handle_chinese_chars": false,
56
+ "strip_accents": null,
57
+ "lowercase": false
58
+ },
59
+ "pre_tokenizer": null,
60
+ "post_processor": {
61
+ "type": "TemplateProcessing",
62
+ "single": [
63
+ {
64
+ "SpecialToken": {
65
+ "id": "[CLS]",
66
+ "type_id": 0
67
+ }
68
+ },
69
+ {
70
+ "Sequence": {
71
+ "id": "A",
72
+ "type_id": 0
73
+ }
74
+ },
75
+ {
76
+ "SpecialToken": {
77
+ "id": "[SEP]",
78
+ "type_id": 0
79
+ }
80
+ }
81
+ ],
82
+ "pair": [
83
+ {
84
+ "SpecialToken": {
85
+ "id": "[CLS]",
86
+ "type_id": 0
87
+ }
88
+ },
89
+ {
90
+ "Sequence": {
91
+ "id": "A",
92
+ "type_id": 0
93
+ }
94
+ },
95
+ {
96
+ "SpecialToken": {
97
+ "id": "[SEP]",
98
+ "type_id": 0
99
+ }
100
+ },
101
+ {
102
+ "Sequence": {
103
+ "id": "B",
104
+ "type_id": 1
105
+ }
106
+ },
107
+ {
108
+ "SpecialToken": {
109
+ "id": "[SEP]",
110
+ "type_id": 1
111
+ }
112
+ }
113
+ ],
114
+ "special_tokens": {
115
+ "[CLS]": {
116
+ "id": "[CLS]",
117
+ "ids": [
118
+ 3
119
+ ],
120
+ "tokens": [
121
+ "[CLS]"
122
+ ]
123
+ },
124
+ "[SEP]": {
125
+ "id": "[SEP]",
126
+ "ids": [
127
+ 1
128
+ ],
129
+ "tokens": [
130
+ "[SEP]"
131
+ ]
132
+ }
133
+ }
134
+ },
135
+ "decoder": {
136
+ "type": "WordPiece",
137
+ "prefix": "##",
138
+ "cleanup": true
139
+ },
140
+ "model": {
141
+ "type": "WordLevel",
142
+ "vocab": {
143
+ "[UNK]": 0,
144
+ "[SEP]": 1,
145
+ "[PAD]": 2,
146
+ "[CLS]": 3,
147
+ "[MASK]": 4,
148
+ "0": 5,
149
+ "1": 6,
150
+ "2": 7,
151
+ "3": 8,
152
+ "4": 9,
153
+ "5": 10,
154
+ "6": 11,
155
+ "7": 12,
156
+ "8": 13,
157
+ "9": 14,
158
+ "10": 15,
159
+ "11": 16,
160
+ "12": 17,
161
+ "13": 18,
162
+ "14": 19,
163
+ "15": 20,
164
+ "16": 21,
165
+ "17": 22,
166
+ "18": 23,
167
+ "19": 24,
168
+ "20": 25,
169
+ "21": 26,
170
+ "22": 27,
171
+ "23": 28,
172
+ "24": 29,
173
+ "25": 30,
174
+ "26": 31,
175
+ "27": 32,
176
+ "28": 33,
177
+ "29": 34,
178
+ "30": 35,
179
+ "31": 36,
180
+ "32": 37,
181
+ "33": 38,
182
+ "34": 39,
183
+ "35": 40,
184
+ "36": 41,
185
+ "37": 42,
186
+ "38": 43,
187
+ "39": 44,
188
+ "40": 45,
189
+ "41": 46,
190
+ "42": 47,
191
+ "43": 48,
192
+ "44": 49,
193
+ "45": 50,
194
+ "46": 51,
195
+ "47": 52,
196
+ "48": 53,
197
+ "49": 54
198
+ },
199
+ "unk_token": "[UNK]"
200
+ }
201
+ }
tokenizers/expressions/tokenizer_config.json ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "[UNK]",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "1": {
12
+ "content": "[SEP]",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "2": {
20
+ "content": "[PAD]",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "3": {
28
+ "content": "[CLS]",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "4": {
36
+ "content": "[MASK]",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ }
43
+ },
44
+ "clean_text": false,
45
+ "clean_up_tokenization_spaces": true,
46
+ "cls_token": "[CLS]",
47
+ "do_basic_tokenize": true,
48
+ "do_lower_case": false,
49
+ "extra_special_tokens": {},
50
+ "mask_token": "[MASK]",
51
+ "model_max_length": 1000000000000000019884624838656,
52
+ "never_split": null,
53
+ "pad_token": "[PAD]",
54
+ "sep_token": "[SEP]",
55
+ "strip_accents": null,
56
+ "tokenize_chinese_chars": false,
57
+ "tokenizer_class": "BertTokenizer",
58
+ "unk_token": "[UNK]"
59
+ }
tokenizers/expressions/vocab.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"[UNK]":0,"[SEP]":1,"[PAD]":2,"[CLS]":3,"[MASK]":4,"0":5,"1":6,"2":7,"3":8,"4":9,"5":10,"6":11,"7":12,"8":13,"9":14,"10":15,"11":16,"12":17,"13":18,"14":19,"15":20,"16":21,"17":22,"18":23,"19":24,"20":25,"21":26,"22":27,"23":28,"24":29,"25":30,"26":31,"27":32,"28":33,"29":34,"30":35,"31":36,"32":37,"33":38,"34":39,"35":40,"36":41,"37":42,"38":43,"39":44,"40":45,"41":46,"42":47,"43":48,"44":49,"45":50,"46":51,"47":52,"48":53,"49":54}
tokenizers/genes/special_tokens_map.json ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": {
3
+ "content": "[CLS]",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "mask_token": {
10
+ "content": "[MASK]",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "[PAD]",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "sep_token": {
24
+ "content": "[SEP]",
25
+ "lstrip": false,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ },
30
+ "unk_token": {
31
+ "content": "[UNK]",
32
+ "lstrip": false,
33
+ "normalized": false,
34
+ "rstrip": false,
35
+ "single_word": false
36
+ }
37
+ }
tokenizers/genes/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizers/genes/tokenizer_config.json ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "[UNK]",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "1": {
12
+ "content": "[SEP]",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "2": {
20
+ "content": "[PAD]",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "3": {
28
+ "content": "[CLS]",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "4": {
36
+ "content": "[MASK]",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ },
43
+ "94909": {
44
+ "content": "[S]",
45
+ "lstrip": false,
46
+ "normalized": false,
47
+ "rstrip": false,
48
+ "single_word": false,
49
+ "special": true
50
+ },
51
+ "94910": {
52
+ "content": "[T]",
53
+ "lstrip": false,
54
+ "normalized": false,
55
+ "rstrip": false,
56
+ "single_word": false,
57
+ "special": true
58
+ }
59
+ },
60
+ "clean_text": false,
61
+ "clean_up_tokenization_spaces": true,
62
+ "cls_token": "[CLS]",
63
+ "do_basic_tokenize": true,
64
+ "do_lower_case": false,
65
+ "extra_special_tokens": {},
66
+ "mask_token": "[MASK]",
67
+ "model_max_length": 1000000000000000019884624838656,
68
+ "never_split": null,
69
+ "pad_token": "[PAD]",
70
+ "sep_token": "[SEP]",
71
+ "strip_accents": null,
72
+ "tokenize_chinese_chars": false,
73
+ "tokenizer_class": "BertTokenizer",
74
+ "unk_token": "[UNK]"
75
+ }
tokenizers/genes/vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizers/label_expressions/special_tokens_map.json ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": {
3
+ "content": "[CLS]",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "mask_token": {
10
+ "content": "[MASK]",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "[PAD]",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "sep_token": {
24
+ "content": "[SEP]",
25
+ "lstrip": false,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ },
30
+ "unk_token": {
31
+ "content": "[UNK]",
32
+ "lstrip": false,
33
+ "normalized": false,
34
+ "rstrip": false,
35
+ "single_word": false
36
+ }
37
+ }
tokenizers/label_expressions/tokenizer.json ADDED
@@ -0,0 +1,201 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "version": "1.0",
3
+ "truncation": null,
4
+ "padding": null,
5
+ "added_tokens": [
6
+ {
7
+ "id": 0,
8
+ "content": "[UNK]",
9
+ "single_word": false,
10
+ "lstrip": false,
11
+ "rstrip": false,
12
+ "normalized": false,
13
+ "special": true
14
+ },
15
+ {
16
+ "id": 1,
17
+ "content": "[SEP]",
18
+ "single_word": false,
19
+ "lstrip": false,
20
+ "rstrip": false,
21
+ "normalized": false,
22
+ "special": true
23
+ },
24
+ {
25
+ "id": 2,
26
+ "content": "[PAD]",
27
+ "single_word": false,
28
+ "lstrip": false,
29
+ "rstrip": false,
30
+ "normalized": false,
31
+ "special": true
32
+ },
33
+ {
34
+ "id": 3,
35
+ "content": "[CLS]",
36
+ "single_word": false,
37
+ "lstrip": false,
38
+ "rstrip": false,
39
+ "normalized": false,
40
+ "special": true
41
+ },
42
+ {
43
+ "id": 4,
44
+ "content": "[MASK]",
45
+ "single_word": false,
46
+ "lstrip": false,
47
+ "rstrip": false,
48
+ "normalized": false,
49
+ "special": true
50
+ }
51
+ ],
52
+ "normalizer": {
53
+ "type": "BertNormalizer",
54
+ "clean_text": true,
55
+ "handle_chinese_chars": false,
56
+ "strip_accents": null,
57
+ "lowercase": false
58
+ },
59
+ "pre_tokenizer": null,
60
+ "post_processor": {
61
+ "type": "TemplateProcessing",
62
+ "single": [
63
+ {
64
+ "SpecialToken": {
65
+ "id": "[CLS]",
66
+ "type_id": 0
67
+ }
68
+ },
69
+ {
70
+ "Sequence": {
71
+ "id": "A",
72
+ "type_id": 0
73
+ }
74
+ },
75
+ {
76
+ "SpecialToken": {
77
+ "id": "[SEP]",
78
+ "type_id": 0
79
+ }
80
+ }
81
+ ],
82
+ "pair": [
83
+ {
84
+ "SpecialToken": {
85
+ "id": "[CLS]",
86
+ "type_id": 0
87
+ }
88
+ },
89
+ {
90
+ "Sequence": {
91
+ "id": "A",
92
+ "type_id": 0
93
+ }
94
+ },
95
+ {
96
+ "SpecialToken": {
97
+ "id": "[SEP]",
98
+ "type_id": 0
99
+ }
100
+ },
101
+ {
102
+ "Sequence": {
103
+ "id": "B",
104
+ "type_id": 1
105
+ }
106
+ },
107
+ {
108
+ "SpecialToken": {
109
+ "id": "[SEP]",
110
+ "type_id": 1
111
+ }
112
+ }
113
+ ],
114
+ "special_tokens": {
115
+ "[CLS]": {
116
+ "id": "[CLS]",
117
+ "ids": [
118
+ 3
119
+ ],
120
+ "tokens": [
121
+ "[CLS]"
122
+ ]
123
+ },
124
+ "[SEP]": {
125
+ "id": "[SEP]",
126
+ "ids": [
127
+ 1
128
+ ],
129
+ "tokens": [
130
+ "[SEP]"
131
+ ]
132
+ }
133
+ }
134
+ },
135
+ "decoder": {
136
+ "type": "WordPiece",
137
+ "prefix": "##",
138
+ "cleanup": true
139
+ },
140
+ "model": {
141
+ "type": "WordLevel",
142
+ "vocab": {
143
+ "[UNK]": 0,
144
+ "[SEP]": 1,
145
+ "[PAD]": 2,
146
+ "[CLS]": 3,
147
+ "[MASK]": 4,
148
+ "0": 5,
149
+ "1": 6,
150
+ "2": 7,
151
+ "3": 8,
152
+ "4": 9,
153
+ "5": 10,
154
+ "6": 11,
155
+ "7": 12,
156
+ "8": 13,
157
+ "9": 14,
158
+ "10": 15,
159
+ "11": 16,
160
+ "12": 17,
161
+ "13": 18,
162
+ "14": 19,
163
+ "15": 20,
164
+ "16": 21,
165
+ "17": 22,
166
+ "18": 23,
167
+ "19": 24,
168
+ "20": 25,
169
+ "21": 26,
170
+ "22": 27,
171
+ "23": 28,
172
+ "24": 29,
173
+ "25": 30,
174
+ "26": 31,
175
+ "27": 32,
176
+ "28": 33,
177
+ "29": 34,
178
+ "30": 35,
179
+ "31": 36,
180
+ "32": 37,
181
+ "33": 38,
182
+ "34": 39,
183
+ "35": 40,
184
+ "36": 41,
185
+ "37": 42,
186
+ "38": 43,
187
+ "39": 44,
188
+ "40": 45,
189
+ "41": 46,
190
+ "42": 47,
191
+ "43": 48,
192
+ "44": 49,
193
+ "45": 50,
194
+ "46": 51,
195
+ "47": 52,
196
+ "48": 53,
197
+ "49": 54
198
+ },
199
+ "unk_token": "[UNK]"
200
+ }
201
+ }
tokenizers/label_expressions/tokenizer_config.json ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "[UNK]",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "1": {
12
+ "content": "[SEP]",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "2": {
20
+ "content": "[PAD]",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "3": {
28
+ "content": "[CLS]",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "4": {
36
+ "content": "[MASK]",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ }
43
+ },
44
+ "clean_text": false,
45
+ "clean_up_tokenization_spaces": true,
46
+ "cls_token": "[CLS]",
47
+ "do_basic_tokenize": true,
48
+ "do_lower_case": false,
49
+ "extra_special_tokens": {},
50
+ "mask_token": "[MASK]",
51
+ "model_max_length": 1000000000000000019884624838656,
52
+ "never_split": null,
53
+ "pad_token": "[PAD]",
54
+ "sep_token": "[SEP]",
55
+ "strip_accents": null,
56
+ "tokenize_chinese_chars": false,
57
+ "tokenizer_class": "BertTokenizer",
58
+ "unk_token": "[UNK]"
59
+ }
tokenizers/label_expressions/vocab.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"[UNK]":0,"[SEP]":1,"[PAD]":2,"[CLS]":3,"[MASK]":4,"0":5,"1":6,"2":7,"3":8,"4":9,"5":10,"6":11,"7":12,"8":13,"9":14,"10":15,"11":16,"12":17,"13":18,"14":19,"15":20,"16":21,"17":22,"18":23,"19":24,"20":25,"21":26,"22":27,"23":28,"24":29,"25":30,"26":31,"27":32,"28":33,"29":34,"30":35,"31":36,"32":37,"33":38,"34":39,"35":40,"36":41,"37":42,"38":43,"39":44,"40":45,"41":46,"42":47,"43":48,"44":49,"45":50,"46":51,"47":52,"48":53,"49":54}
tokenizers/perturbations/special_tokens_map.json ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": {
3
+ "content": "[CLS]",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "mask_token": {
10
+ "content": "[MASK]",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "[PAD]",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "sep_token": {
24
+ "content": "[SEP]",
25
+ "lstrip": false,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ },
30
+ "unk_token": {
31
+ "content": "[UNK]",
32
+ "lstrip": false,
33
+ "normalized": false,
34
+ "rstrip": false,
35
+ "single_word": false
36
+ }
37
+ }
tokenizers/perturbations/tokenizer.json ADDED
@@ -0,0 +1,153 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "version": "1.0",
3
+ "truncation": null,
4
+ "padding": null,
5
+ "added_tokens": [
6
+ {
7
+ "id": 0,
8
+ "content": "[UNK]",
9
+ "single_word": false,
10
+ "lstrip": false,
11
+ "rstrip": false,
12
+ "normalized": false,
13
+ "special": true
14
+ },
15
+ {
16
+ "id": 1,
17
+ "content": "[SEP]",
18
+ "single_word": false,
19
+ "lstrip": false,
20
+ "rstrip": false,
21
+ "normalized": false,
22
+ "special": true
23
+ },
24
+ {
25
+ "id": 2,
26
+ "content": "[PAD]",
27
+ "single_word": false,
28
+ "lstrip": false,
29
+ "rstrip": false,
30
+ "normalized": false,
31
+ "special": true
32
+ },
33
+ {
34
+ "id": 3,
35
+ "content": "[CLS]",
36
+ "single_word": false,
37
+ "lstrip": false,
38
+ "rstrip": false,
39
+ "normalized": false,
40
+ "special": true
41
+ },
42
+ {
43
+ "id": 4,
44
+ "content": "[MASK]",
45
+ "single_word": false,
46
+ "lstrip": false,
47
+ "rstrip": false,
48
+ "normalized": false,
49
+ "special": true
50
+ }
51
+ ],
52
+ "normalizer": {
53
+ "type": "BertNormalizer",
54
+ "clean_text": true,
55
+ "handle_chinese_chars": false,
56
+ "strip_accents": null,
57
+ "lowercase": false
58
+ },
59
+ "pre_tokenizer": null,
60
+ "post_processor": {
61
+ "type": "TemplateProcessing",
62
+ "single": [
63
+ {
64
+ "SpecialToken": {
65
+ "id": "[CLS]",
66
+ "type_id": 0
67
+ }
68
+ },
69
+ {
70
+ "Sequence": {
71
+ "id": "A",
72
+ "type_id": 0
73
+ }
74
+ },
75
+ {
76
+ "SpecialToken": {
77
+ "id": "[SEP]",
78
+ "type_id": 0
79
+ }
80
+ }
81
+ ],
82
+ "pair": [
83
+ {
84
+ "SpecialToken": {
85
+ "id": "[CLS]",
86
+ "type_id": 0
87
+ }
88
+ },
89
+ {
90
+ "Sequence": {
91
+ "id": "A",
92
+ "type_id": 0
93
+ }
94
+ },
95
+ {
96
+ "SpecialToken": {
97
+ "id": "[SEP]",
98
+ "type_id": 0
99
+ }
100
+ },
101
+ {
102
+ "Sequence": {
103
+ "id": "B",
104
+ "type_id": 1
105
+ }
106
+ },
107
+ {
108
+ "SpecialToken": {
109
+ "id": "[SEP]",
110
+ "type_id": 1
111
+ }
112
+ }
113
+ ],
114
+ "special_tokens": {
115
+ "[CLS]": {
116
+ "id": "[CLS]",
117
+ "ids": [
118
+ 3
119
+ ],
120
+ "tokens": [
121
+ "[CLS]"
122
+ ]
123
+ },
124
+ "[SEP]": {
125
+ "id": "[SEP]",
126
+ "ids": [
127
+ 1
128
+ ],
129
+ "tokens": [
130
+ "[SEP]"
131
+ ]
132
+ }
133
+ }
134
+ },
135
+ "decoder": {
136
+ "type": "WordPiece",
137
+ "prefix": "##",
138
+ "cleanup": true
139
+ },
140
+ "model": {
141
+ "type": "WordLevel",
142
+ "vocab": {
143
+ "[UNK]": 0,
144
+ "[SEP]": 1,
145
+ "[PAD]": 2,
146
+ "[CLS]": 3,
147
+ "[MASK]": 4,
148
+ "0": 5,
149
+ "1": 6
150
+ },
151
+ "unk_token": "[UNK]"
152
+ }
153
+ }
tokenizers/perturbations/tokenizer_config.json ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "[UNK]",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "1": {
12
+ "content": "[SEP]",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "2": {
20
+ "content": "[PAD]",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "3": {
28
+ "content": "[CLS]",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "4": {
36
+ "content": "[MASK]",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ }
43
+ },
44
+ "clean_text": false,
45
+ "clean_up_tokenization_spaces": true,
46
+ "cls_token": "[CLS]",
47
+ "do_basic_tokenize": true,
48
+ "do_lower_case": false,
49
+ "extra_special_tokens": {},
50
+ "mask_token": "[MASK]",
51
+ "model_max_length": 1000000000000000019884624838656,
52
+ "never_split": null,
53
+ "pad_token": "[PAD]",
54
+ "sep_token": "[SEP]",
55
+ "strip_accents": null,
56
+ "tokenize_chinese_chars": false,
57
+ "tokenizer_class": "BertTokenizer",
58
+ "unk_token": "[UNK]"
59
+ }
tokenizers/perturbations/vocab.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"[UNK]":0,"[SEP]":1,"[PAD]":2,"[CLS]":3,"[MASK]":4,"0":5,"1":6}