add tokenizer
Browse files- added_tokens.json +1 -0
- special_tokens_map.json +1 -0
- tokenizer_config.json +1 -0
- vocab.json +1 -0
added_tokens.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"<s>": 69, "</s>": 70}
|
special_tokens_map.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"bos_token": "<s>", "eos_token": "</s>", "unk_token": "[UNK]", "pad_token": "[PAD]", "additional_special_tokens": [{"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}]}
|
tokenizer_config.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"unk_token": "[UNK]", "bos_token": "<s>", "eos_token": "</s>", "pad_token": "[PAD]", "do_lower_case": false, "word_delimiter_token": "|", "special_tokens_map_file": null, "tokenizer_file": null, "name_or_path": "./", "tokenizer_class": "Wav2Vec2CTCTokenizer"}
|
vocab.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"ة": 0, "ّ": 1, "ت": 2, "َ": 3, "ح": 4, "ﺃ": 5, "»": 6, "ف": 7, "ط": 8, "ڨ": 9, "د": 10, "أ": 11, "ى": 12, "ؤ": 13, "؛": 14, "ۚ": 15, "ھ": 16, "ْ": 17, "إ": 18, "ء": 19, "ً": 20, "ـ": 21, "_": 22, "ث": 23, "ل": 24, "خ": 25, "ِ": 26, "t": 27, "ض": 28, "ق": 29, "م": 30, "ج": 31, "؟": 32, "ی": 33, "ن": 34, "ش": 35, "«": 36, "ٍ": 37, "ۖ": 38, "e": 39, "g": 40, "—": 41, "ُ": 42, "چ": 43, "ا": 44, "ز": 45, "ٰ": 46, "ظ": 47, "ي": 48, "ه": 49, "ئ": 50, "ذ": 51, "ك": 52, "ب": 53, "ٌ": 54, "،": 55, "ر": 56, "ص": 57, "آ": 58, "س": 59, "ﻻ": 60, "ک": 61, "ع": 62, "غ": 63, "و": 64, "☭": 65, "|": 66, "[UNK]": 67, "[PAD]": 68}
|