Commit
·
a53d4ad
1
Parent(s):
e8f97ea
name_or_path removed
Browse files- config.json +0 -1
- tokenizer_config.json +1 -1
config.json
CHANGED
@@ -1,5 +1,4 @@
|
|
1 |
{
|
2 |
-
"_name_or_path": "KoichiYasuoka/bert-large-japanese-char-extended",
|
3 |
"architectures": [
|
4 |
"BertForTokenClassification"
|
5 |
],
|
|
|
1 |
{
|
|
|
2 |
"architectures": [
|
3 |
"BertForTokenClassification"
|
4 |
],
|
tokenizer_config.json
CHANGED
@@ -1 +1 @@
|
|
1 |
-
{"unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]", "do_lower_case": false, "do_word_tokenize": true, "do_subword_tokenize": true, "word_tokenizer_type": "basic", "subword_tokenizer_type": "character", "never_split": ["[PAD]", "[UNK]", "[CLS]", "[SEP]", "[MASK]"], "mecab_kwargs": null, "special_tokens_map_file": null, "tokenizer_file": null
|
|
|
1 |
+
{"unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]", "do_lower_case": false, "do_word_tokenize": true, "do_subword_tokenize": true, "word_tokenizer_type": "basic", "subword_tokenizer_type": "character", "never_split": ["[PAD]", "[UNK]", "[CLS]", "[SEP]", "[MASK]"], "mecab_kwargs": null, "special_tokens_map_file": null, "tokenizer_file": null}
|