Force use of the slow tokenizer to avoid tokenizer.json issues
Browse files
README.md
CHANGED
@@ -40,16 +40,14 @@ Run model in Python:
|
|
40 |
```python
|
41 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
42 |
|
43 |
-
tokenizer
|
|
|
44 |
model = AutoModelForCausalLM.from_pretrained("SVECTOR-CORPORATION/Theta-35-Mini")
|
45 |
|
46 |
-
# Prompt input
|
47 |
inputs = tokenizer("Once upon a time", return_tensors="pt")
|
48 |
|
49 |
-
# Generate output
|
50 |
outputs = model.generate(**inputs, max_length=100, temperature=0.7)
|
51 |
|
52 |
-
# Decode and print
|
53 |
print(tokenizer.decode(outputs[0], skip_special_tokens=True))
|
54 |
```
|
55 |
|
|
|
40 |
```python
|
41 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
42 |
|
43 |
+
# Force use of the slow tokenizer to avoid tokenizer.json issues
|
44 |
+
tokenizer = AutoTokenizer.from_pretrained("SVECTOR-CORPORATION/Theta-35-Mini", use_fast=False)
|
45 |
model = AutoModelForCausalLM.from_pretrained("SVECTOR-CORPORATION/Theta-35-Mini")
|
46 |
|
|
|
47 |
inputs = tokenizer("Once upon a time", return_tensors="pt")
|
48 |
|
|
|
49 |
outputs = model.generate(**inputs, max_length=100, temperature=0.7)
|
50 |
|
|
|
51 |
print(tokenizer.decode(outputs[0], skip_special_tokens=True))
|
52 |
```
|
53 |
|