Update README.md
Browse files
README.md
CHANGED
@@ -41,16 +41,7 @@ from peft import PeftModel
|
|
41 |
|
42 |
MODEL_ID = "LoftQ/Meta-Llama-3-8B-4bit-64rank"
|
43 |
|
44 |
-
base_model = AutoModelForCausalLM.from_pretrained(
|
45 |
-
MODEL_ID,
|
46 |
-
torch_dtype=torch.bfloat16, # you may change it with different models
|
47 |
-
quantization_config=BitsAndBytesConfig(
|
48 |
-
load_in_4bit=True,
|
49 |
-
bnb_4bit_compute_dtype=torch.bfloat16, # bfloat16 is recommended
|
50 |
-
bnb_4bit_use_double_quant=False,
|
51 |
-
bnb_4bit_quant_type='nf4',
|
52 |
-
),
|
53 |
-
)
|
54 |
peft_model = PeftModel.from_pretrained(
|
55 |
base_model,
|
56 |
MODEL_ID,
|
|
|
41 |
|
42 |
MODEL_ID = "LoftQ/Meta-Llama-3-8B-4bit-64rank"
|
43 |
|
44 |
+
base_model = AutoModelForCausalLM.from_pretrained(MODEL_ID)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
45 |
peft_model = PeftModel.from_pretrained(
|
46 |
base_model,
|
47 |
MODEL_ID,
|