Fix example usage
Browse files
README.md
CHANGED
@@ -17,8 +17,8 @@ The model was trained on the OLMo2 pretraining data. It has a context length of
|
|
17 |
```
|
18 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
19 |
|
20 |
-
tokenizer = AutoTokenizer.from_pretrained("UW/OLMo2-
|
21 |
-
model = AutoModelForCausalLM.from_pretrained("UW/OLMo2-
|
22 |
|
23 |
tokenizer.convert_ids_to_tokens(tokenizer.encode("By the way, I am a fan of the Milky Way."))
|
24 |
# ['ByĠtheĠway', ',ĠIĠam', 'Ġa', 'Ġfan', 'ĠofĠthe', 'ĠMilkyĠWay', '.']
|
|
|
17 |
```
|
18 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
19 |
|
20 |
+
tokenizer = AutoTokenizer.from_pretrained("UW/OLMo2-11B-SuperBPE-t180k")
|
21 |
+
model = AutoModelForCausalLM.from_pretrained("UW/OLMo2-11B-SuperBPE-t180k")
|
22 |
|
23 |
tokenizer.convert_ids_to_tokens(tokenizer.encode("By the way, I am a fan of the Milky Way."))
|
24 |
# ['ByĠtheĠway', ',ĠIĠam', 'Ġa', 'Ġfan', 'ĠofĠthe', 'ĠMilkyĠWay', '.']
|