abdfajar707 commited on
Commit
fe73aaa
1 Parent(s): 6d1666c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +20 -14
app.py CHANGED
@@ -1,12 +1,21 @@
 
1
  import gradio as gr
2
- from huggingface_hub import InferenceClient
3
 
4
- """
5
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
6
- """
7
- client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
8
 
 
 
 
 
 
 
 
 
9
 
 
10
  def respond(
11
  message,
12
  history: list[tuple[str, str]],
@@ -27,23 +36,20 @@ def respond(
27
 
28
  response = ""
29
 
30
- for message in client.chat_completion(
31
  messages,
32
  max_tokens=max_tokens,
33
  stream=True,
34
  temperature=temperature,
35
  top_p=top_p,
36
  ):
37
- token = message.choices[0].delta.content
38
-
39
  response += token
40
  yield response
41
 
42
- """
43
- For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
44
- """
45
  demo = gr.ChatInterface(
46
- respond,
47
  additional_inputs=[
48
  gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
49
  gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
@@ -58,6 +64,6 @@ demo = gr.ChatInterface(
58
  ],
59
  )
60
 
61
-
62
  if __name__ == "__main__":
63
- demo.launch()
 
1
+ from unsloth import FastLanguageModel
2
  import gradio as gr
 
3
 
4
+ # Declare necessary variables
5
+ max_seq_length = 2048 # Choose any! We auto support RoPE Scaling internally!
6
+ dtype = None # None for auto detection. Float16 for Tesla T4, V100, Bfloat16 for Ampere+
7
+ load_in_4bit = True # Use 4bit quantization to reduce memory usage. Can be False.
8
 
9
+ # Load the model and tokenizer
10
+ model, tokenizer = FastLanguageModel.from_pretrained(
11
+ model_name="abdfajar707/llama3_8B_lora_model_rkp_pn2025_v3", # YOUR MODEL YOU USED FOR TRAINING
12
+ max_seq_length=max_seq_length,
13
+ dtype=dtype,
14
+ load_in_4bit=load_in_4bit,
15
+ )
16
+ FastLanguageModel.for_inference(model) # Enable native 2x faster inference
17
 
18
+ # Define the respond function
19
  def respond(
20
  message,
21
  history: list[tuple[str, str]],
 
36
 
37
  response = ""
38
 
39
+ for msg in model.chat_completion(
40
  messages,
41
  max_tokens=max_tokens,
42
  stream=True,
43
  temperature=temperature,
44
  top_p=top_p,
45
  ):
46
+ token = msg.choices[0].delta.content
 
47
  response += token
48
  yield response
49
 
50
+ # Create the Gradio interface
 
 
51
  demo = gr.ChatInterface(
52
+ fn=respond,
53
  additional_inputs=[
54
  gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
55
  gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
 
64
  ],
65
  )
66
 
67
+ # Launch the Gradio interface
68
  if __name__ == "__main__":
69
+ demo.launch()