|
{ |
|
"added_tokens_decoder": { |
|
"35": { |
|
"content": "[UNK]", |
|
"lstrip": true, |
|
"normalized": false, |
|
"rstrip": true, |
|
"single_word": false, |
|
"special": false |
|
}, |
|
"36": { |
|
"content": "[PAD]", |
|
"lstrip": true, |
|
"normalized": false, |
|
"rstrip": true, |
|
"single_word": false, |
|
"special": false |
|
}, |
|
"37": { |
|
"content": "<s>", |
|
"lstrip": false, |
|
"normalized": false, |
|
"rstrip": false, |
|
"single_word": false, |
|
"special": true |
|
}, |
|
"38": { |
|
"content": "</s>", |
|
"lstrip": false, |
|
"normalized": false, |
|
"rstrip": false, |
|
"single_word": false, |
|
"special": true |
|
} |
|
}, |
|
"additional_special_tokens": [], |
|
"bos_token": "<s>", |
|
"clean_up_tokenization_spaces": true, |
|
"do_lower_case": false, |
|
"eos_token": "</s>", |
|
"model_max_length": 1000000000000000019884624838656, |
|
"pad_token": "[PAD]", |
|
"replace_word_delimiter_char": " ", |
|
"target_lang": null, |
|
"tokenizer_class": "Wav2Vec2CTCTokenizer", |
|
"tokenizer_file": null, |
|
"unk_token": "[UNK]", |
|
"vocab": { |
|
"[PAD]": 36, |
|
"[UNK]": 35, |
|
"a": 1, |
|
"b": 2, |
|
"c": 3, |
|
"d": 4, |
|
"e": 5, |
|
"f": 6, |
|
"g": 7, |
|
"h": 8, |
|
"i": 9, |
|
"j": 10, |
|
"k": 11, |
|
"l": 12, |
|
"m": 13, |
|
"n": 14, |
|
"o": 15, |
|
"p": 16, |
|
"r": 17, |
|
"s": 18, |
|
"t": 19, |
|
"u": 20, |
|
"v": 21, |
|
"x": 22, |
|
"z": 23, |
|
"|": 0, |
|
"ā": 24, |
|
"č": 25, |
|
"ē": 26, |
|
"ģ": 27, |
|
"ī": 28, |
|
"ķ": 29, |
|
"ļ": 30, |
|
"ņ": 31, |
|
"š": 32, |
|
"ū": 33, |
|
"ž": 34 |
|
}, |
|
"word_delimiter_token": "|" |
|
} |
|
|