p1atdev's picture
Upload 6 files
3c2add4
{
"add_bos_token": true,
"add_eos_token": false,
"add_special_tokens": true,
"additional_special_tokens": [
"0",
"1",
"2",
"3",
"4",
"5",
"6",
"7",
"8",
"9",
"▁▁",
"▁▁▁▁",
"▁▁▁▁▁▁▁▁",
"▁▁▁▁▁▁▁▁▁▁▁▁",
"▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁",
" ",
" ",
"⁂",
"─",
"***",
"----",
"▁\"",
"▁Author",
"▁Title",
"▁Tags",
"▁Genre",
"▁Style",
"▁Knowledge",
"▁Summary",
"▁Rating",
"Type",
"Characters",
"Glossary",
"<|spmspace|>",
"<|spmspace|><|spmspace|>",
"<|spmspace|><|spmspace|><|spmspace|><|spmspace|>",
"<|spmspace|><|spmspace|><|spmspace|><|spmspace|><|spmspace|><|spmspace|><|spmspace|><|spmspace|>",
"<|spmspace|><|spmspace|><|spmspace|><|spmspace|><|spmspace|><|spmspace|><|spmspace|><|spmspace|><|spmspace|><|spmspace|><|spmspace|><|spmspace|>",
"<|spmspace|><|spmspace|><|spmspace|><|spmspace|><|spmspace|><|spmspace|><|spmspace|><|spmspace|><|spmspace|><|spmspace|><|spmspace|><|spmspace|><|spmspace|><|spmspace|><|spmspace|><|spmspace|>",
"<|mtvocab|>",
"<|mtvenglish|>",
"<|mtvjapanese|>",
"<|mtsentence|>",
"<|mtsjapanese|>",
"<|mtsenglish|>",
"<|mtsentenceend|>",
"<|mtvocabend|>",
"<|mtend|>",
"<|mask|>",
"<|masksingle|>",
"<|maskshort|>",
"<|maskmedium|>",
"<|masklong|>",
"<|maskparagraph|>",
"<|maskend|>",
"<|fill|>",
"<|fillend|>",
"<|rubycover|>",
"<|rubystart|>",
"<|rubyend|>",
"<|reserved0|>",
"<|reserved1|>",
"<|reserved2|>",
"<|reserved3|>",
"<|reserved4|>",
"<|reserved5|>",
"<|reserved6|>",
"<|reserved7|>",
"<|reserved8|>",
"<|reserved9|>",
"<|reserved10|>"
],
"bos_token": {
"__type": "AddedToken",
"content": "<|startoftext|>",
"lstrip": false,
"normalized": true,
"rstrip": false,
"single_word": false
},
"clean_up_tokenization_spaces": false,
"eos_token": {
"__type": "AddedToken",
"content": "<|endoftext|>",
"lstrip": false,
"normalized": true,
"rstrip": false,
"single_word": false
},
"legacy": true,
"model_max_length": 8192,
"pad_token": {
"__type": "AddedToken",
"content": "<|pad|>",
"lstrip": false,
"normalized": true,
"rstrip": false,
"single_word": false
},
"sp_model_kwargs": {},
"tokenizer_class": "LlamaTokenizer",
"truncation": false,
"unk_token": {
"__type": "AddedToken",
"content": "<|unknown|>",
"lstrip": false,
"normalized": true,
"rstrip": false,
"single_word": false
}
}