Update README.md
Browse files
README.md
CHANGED
@@ -15,7 +15,7 @@ We are releasing an intermediate checkpoint of SmolLM2 to enable further researc
|
|
15 |
import torch
|
16 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
17 |
checkpoint = "HuggingFaceTB/SmolLM2-135M-intermediate-checkpoints"
|
18 |
-
revision = "step-
|
19 |
device = torch.device("cuda" if torch.cuda.is_available() else "mps" if hasattr(torch, 'mps') and torch.mps.is_available() else "cpu")
|
20 |
tokenizer = AutoTokenizer.from_pretrained(checkpoint, revision=revision)
|
21 |
model = AutoModelForCausalLM.from_pretrained(checkpoint, revision=revision).to(device)
|
@@ -26,7 +26,7 @@ print(tokenizer.decode(outputs[0]))
|
|
26 |
|
27 |
## Training Details
|
28 |
For comprehensive information about SmolLM2 training methodology, please refer to:
|
29 |
-
- Our [model page](https://huggingface.co/
|
30 |
- Our [GitHub repository](https://github.com/huggingface/smollm)
|
31 |
- Our [paper](https://huggingface.co/papers/2502.02737)
|
32 |
|
|
|
15 |
import torch
|
16 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
17 |
checkpoint = "HuggingFaceTB/SmolLM2-135M-intermediate-checkpoints"
|
18 |
+
revision = "step-240000" # replace by the revision you want
|
19 |
device = torch.device("cuda" if torch.cuda.is_available() else "mps" if hasattr(torch, 'mps') and torch.mps.is_available() else "cpu")
|
20 |
tokenizer = AutoTokenizer.from_pretrained(checkpoint, revision=revision)
|
21 |
model = AutoModelForCausalLM.from_pretrained(checkpoint, revision=revision).to(device)
|
|
|
26 |
|
27 |
## Training Details
|
28 |
For comprehensive information about SmolLM2 training methodology, please refer to:
|
29 |
+
- Our [model page](https://huggingface.co/HuggingFaceTB/SmolLM2-135M)
|
30 |
- Our [GitHub repository](https://github.com/huggingface/smollm)
|
31 |
- Our [paper](https://huggingface.co/papers/2502.02737)
|
32 |
|