mimireyburn commited on
Commit
80150e9
·
1 Parent(s): c1bb08d

Test w/o 8bit

Browse files
Files changed (1) hide show
  1. app.py +1 -1
app.py CHANGED
@@ -7,7 +7,7 @@ checkpoint = 1200
7
 
8
  # Load your fine-tuned model and tokenizer
9
  tokenizer = t.AutoTokenizer.from_pretrained("NousResearch/Llama-2-7b-hf")
10
- model = t.AutoModelForCausalLM.from_pretrained("NousResearch/Llama-2-7b-hf", load_in_8bit=True, torch_dtype=torch.float16)
11
  tokenizer.pad_token_id = 0
12
 
13
  config = peft.LoraConfig(r=8, lora_alpha=16, target_modules=["q_proj", "v_proj"], lora_dropout=0.005, bias="none", task_type="CAUSAL_LM")
 
7
 
8
  # Load your fine-tuned model and tokenizer
9
  tokenizer = t.AutoTokenizer.from_pretrained("NousResearch/Llama-2-7b-hf")
10
+ model = t.AutoModelForCausalLM.from_pretrained("NousResearch/Llama-2-7b-hf")
11
  tokenizer.pad_token_id = 0
12
 
13
  config = peft.LoraConfig(r=8, lora_alpha=16, target_modules=["q_proj", "v_proj"], lora_dropout=0.005, bias="none", task_type="CAUSAL_LM")