bkaplan commited on
Commit
ba2d8b6
·
verified ·
1 Parent(s): c7304a0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -5
app.py CHANGED
@@ -1,11 +1,10 @@
1
  import gradio as gr
2
- from transformers import AutoModelForCausalLM, AutoTokenizer
3
  import torch
4
 
5
- # Modeli yükleyin
6
- model_name = "bkaplan/MRL1"
7
- tokenizer = AutoTokenizer.from_pretrained(model_name)
8
- model = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto", torch_dtype=torch.float16)
9
 
10
  def respond(message, history, system_message, max_tokens, temperature, top_p):
11
  try:
 
1
  import gradio as gr
2
+ from transformers import LlamaTokenizer, LlamaForCausalLM
3
  import torch
4
 
5
+ # Genel bir LLaMA tokenizer'ı kullanın
6
+ tokenizer = LlamaTokenizer.from_pretrained("meta-llama/Llama-2-7b-chat-hf")
7
+ model = LlamaForCausalLM.from_pretrained("bkaplan/MRL1", device_map="auto", torch_dtype=torch.float16)
 
8
 
9
  def respond(message, history, system_message, max_tokens, temperature, top_p):
10
  try: