Update README.md
Browse files
README.md
CHANGED
@@ -7,6 +7,7 @@ tags:
|
|
7 |
- wizardlm
|
8 |
- uncensored
|
9 |
- gptq
|
|
|
10 |
- auto-gptq
|
11 |
- 7b
|
12 |
- llama
|
@@ -15,6 +16,8 @@ tags:
|
|
15 |
|
16 |
# Get Started
|
17 |
This model should use [AutoGPTQ](https://github.com/PanQiWei/AutoGPTQ) so you need to use `auto-gptq`
|
|
|
|
|
18 |
|
19 |
```py
|
20 |
from transformers import AutoTokenizer, pipeline, AutoModelForCausalLM, LlamaForCausalLM, LlamaTokenizer, StoppingCriteria, PreTrainedTokenizerBase
|
|
|
7 |
- wizardlm
|
8 |
- uncensored
|
9 |
- gptq
|
10 |
+
- quantization
|
11 |
- auto-gptq
|
12 |
- 7b
|
13 |
- llama
|
|
|
16 |
|
17 |
# Get Started
|
18 |
This model should use [AutoGPTQ](https://github.com/PanQiWei/AutoGPTQ) so you need to use `auto-gptq`
|
19 |
+
- `no-act-order` model
|
20 |
+
- 4bit model quantization
|
21 |
|
22 |
```py
|
23 |
from transformers import AutoTokenizer, pipeline, AutoModelForCausalLM, LlamaForCausalLM, LlamaTokenizer, StoppingCriteria, PreTrainedTokenizerBase
|