Update README.md
Browse files
README.md
CHANGED
@@ -32,7 +32,7 @@ This is a quantized version of `Llama 3.1 70B Instruct`. Quantization to **4-bit
|
|
32 |
- **License:** llama3.1
|
33 |
- **Base Model [optional]:** meta-llama/Meta-Llama-3.1-8B-Instruct
|
34 |
|
35 |
-
```
|
36 |
# Use a pipeline as a high-level helper
|
37 |
from transformers import pipeline
|
38 |
|
@@ -43,7 +43,7 @@ pipe = pipeline("text-generation", model="meta-llama/Meta-Llama-3.1-8B-Instruct"
|
|
43 |
pipe(messages) Copy # Load model directly
|
44 |
```
|
45 |
|
46 |
-
```
|
47 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
48 |
|
49 |
tokenizer = AutoTokenizer.from_pretrained("meta-llama/Meta-Llama-3.1-8B-Instruct")
|
|
|
32 |
- **License:** llama3.1
|
33 |
- **Base Model [optional]:** meta-llama/Meta-Llama-3.1-8B-Instruct
|
34 |
|
35 |
+
```python
|
36 |
# Use a pipeline as a high-level helper
|
37 |
from transformers import pipeline
|
38 |
|
|
|
43 |
pipe(messages) Copy # Load model directly
|
44 |
```
|
45 |
|
46 |
+
```python
|
47 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
48 |
|
49 |
tokenizer = AutoTokenizer.from_pretrained("meta-llama/Meta-Llama-3.1-8B-Instruct")
|