add tokenizer
Browse files- added_tokens.json +1 -0
- merges.txt +0 -0
- special_tokens_map.json +1 -0
- tokenizer_config.json +1 -0
- vocab.json +0 -0
added_tokens.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{" ": 50257, " ": 50258, " ": 50259, " ": 50260, " ": 50261, " ": 50262, " ": 50263, " ": 50264, " ": 50265, " ": 50266, " ": 50267, " ": 50268, " ": 50269, " ": 50270, " ": 50271, " ": 50272, " ": 50273, " ": 50274, " ": 50275, " ": 50276, " ": 50277, " ": 50278, " ": 50279, " ": 50280, " ": 50281, " ": 50282, " ": 50283, " ": 50284, " ": 50285, " ": 50286, "\t\t\t\t\t\t\t\t\t": 50287, "\t\t\t\t\t\t\t\t": 50288, "\t\t\t\t\t\t\t": 50289, "\t\t\t\t\t\t": 50290, "\t\t\t\t\t": 50291, "\t\t\t\t": 50292, "\t\t\t": 50293, "\t\t": 50294, "50256": 50295}
|
merges.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
special_tokens_map.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"bos_token": "<|endoftext|>", "eos_token": "<|endoftext|>", "unk_token": "<|endoftext|>", "pad_token": 50256}
|
tokenizer_config.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"errors": "replace", "unk_token": {"content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "bos_token": {"content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "eos_token": {"content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "add_prefix_space": false, "model_max_length": 1024, "special_tokens_map_file": null, "name_or_path": "PT_SFCodeGen_2B", "trust_remote_code": true, "tokenizer_class": "GPT2Tokenizer"}
|
vocab.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|