Upload 5 files
Browse filesUpload tokenizer of the original model [nvidia/Nemotron-Mini-4B-Instruct](https://huggingface.co/nvidia/Nemotron-Mini-4B-Instruct/tree/main) as the tokenizer load too slow in gguf

- .gitattributes +1 -0
- chat_template.jinja +15 -0
- special_tokens_map.json +1 -0
- tokenizer.json +3 -0
- tokenizer.model +3 -0
- tokenizer_config.json +12 -0
.gitattributes
CHANGED
@@ -52,3 +52,4 @@ Nemotron-Mini-4B-Instruct-Q3_K_L.gguf filter=lfs diff=lfs merge=lfs -text
|
|
52 |
Nemotron-Mini-4B-Instruct-IQ3_M.gguf filter=lfs diff=lfs merge=lfs -text
|
53 |
Nemotron-Mini-4B-Instruct-f16.gguf filter=lfs diff=lfs merge=lfs -text
|
54 |
Nemotron-Mini-4B-Instruct.imatrix filter=lfs diff=lfs merge=lfs -text
|
|
|
|
52 |
Nemotron-Mini-4B-Instruct-IQ3_M.gguf filter=lfs diff=lfs merge=lfs -text
|
53 |
Nemotron-Mini-4B-Instruct-f16.gguf filter=lfs diff=lfs merge=lfs -text
|
54 |
Nemotron-Mini-4B-Instruct.imatrix filter=lfs diff=lfs merge=lfs -text
|
55 |
+
tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
chat_template.jinja
ADDED
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{{'<extra_id_0>System'}}{% for message in messages %}{% if message['role'] == 'system' %}{{'
|
2 |
+
' + message['content'].strip()}}{% if tools or contexts %}{{'
|
3 |
+
'}}{% endif %}{% endif %}{% endfor %}{% if tools %}{% for tool in tools %}{{ '
|
4 |
+
<tool> ' + tool|tojson + ' </tool>' }}{% endfor %}{% endif %}{% if contexts %}{% if tools %}{{'
|
5 |
+
'}}{% endif %}{% for context in contexts %}{{ '
|
6 |
+
<context> ' + context.strip() + ' </context>' }}{% endfor %}{% endif %}{{'
|
7 |
+
|
8 |
+
'}}{% for message in messages %}{% if message['role'] == 'user' %}{{ '<extra_id_1>User
|
9 |
+
' + message['content'].strip() + '
|
10 |
+
' }}{% elif message['role'] == 'assistant' %}{{ '<extra_id_1>Assistant
|
11 |
+
' + message['content'].strip() + '
|
12 |
+
' }}{% elif message['role'] == 'tool' %}{{ '<extra_id_1>Tool
|
13 |
+
' + message['content'].strip() + '
|
14 |
+
' }}{% endif %}{% endfor %}{{'<extra_id_1>Assistant
|
15 |
+
'}}
|
special_tokens_map.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{}
|
tokenizer.json
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f1f39bcb3ba1b42457ec34b6cde2cb7ccffa82a077997c9aed8c5ee687779451
|
3 |
+
size 34620230
|
tokenizer.model
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:6dfd8b970f437002fc445214304969fe59e64d4f48500bd0b77ba55340f2d811
|
3 |
+
size 4545602
|
tokenizer_config.json
ADDED
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"added_tokens_decoder": {},
|
3 |
+
"bos_token_id": null,
|
4 |
+
"clean_up_tokenization_spaces": false,
|
5 |
+
"eos_token_id": null,
|
6 |
+
"extra_special_tokens": {},
|
7 |
+
"model_max_length": 1000000000000000019884624838656,
|
8 |
+
"model_type": "llama",
|
9 |
+
"pad_token_id": null,
|
10 |
+
"tokenizer_class": "PreTrainedTokenizerFast",
|
11 |
+
"unk_token_id": null
|
12 |
+
}
|