Upload tokenizer
Browse files- tokenizer_config.json +0 -1
- vocab.json +0 -0
tokenizer_config.json
CHANGED
@@ -26,7 +26,6 @@
|
|
26 |
"extra_special_tokens": {},
|
27 |
"model_max_length": 77,
|
28 |
"pad_token": "<|endoftext|>",
|
29 |
-
"processor_class": "CLIPProcessor",
|
30 |
"tokenizer_class": "CLIPTokenizer",
|
31 |
"unk_token": "<|endoftext|>"
|
32 |
}
|
|
|
26 |
"extra_special_tokens": {},
|
27 |
"model_max_length": 77,
|
28 |
"pad_token": "<|endoftext|>",
|
|
|
29 |
"tokenizer_class": "CLIPTokenizer",
|
30 |
"unk_token": "<|endoftext|>"
|
31 |
}
|
vocab.json
CHANGED
The diff for this file is too large to render.
See raw diff
|
|