panga12345 commited on
Commit
ba78b2f
·
verified ·
1 Parent(s): c224e9a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +20 -23
app.py CHANGED
@@ -1,11 +1,11 @@
1
  import gradio as gr
2
- from huggingface_hub import InferenceClient
3
-
4
- """
5
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
6
- """
7
- client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
8
 
 
 
 
 
 
9
 
10
  def respond(
11
  message,
@@ -24,25 +24,23 @@ def respond(
24
  messages.append({"role": "assistant", "content": val[1]})
25
 
26
  messages.append({"role": "user", "content": message})
27
-
 
28
  response = ""
29
-
30
- for message in client.chat_completion(
31
- messages,
32
- max_tokens=max_tokens,
33
- stream=True,
34
- temperature=temperature,
35
- top_p=top_p,
36
- ):
37
- token = message.choices[0].delta.content
38
-
39
  response += token
40
  yield response
41
 
42
-
43
- """
44
- For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
45
- """
46
  demo = gr.ChatInterface(
47
  respond,
48
  additional_inputs=[
@@ -59,6 +57,5 @@ demo = gr.ChatInterface(
59
  ],
60
  )
61
 
62
-
63
  if __name__ == "__main__":
64
- demo.launch()
 
1
  import gradio as gr
2
+ from llama_cpp import Llama
 
 
 
 
 
3
 
4
+ # Load the model
5
+ llm = Llama.from_pretrained(
6
+ repo_id="mradermacher/Fimbulvetr-11B-v2-GGUF",
7
+ filename="Fimbulvetr-11B-v2.IQ3_M.gguf",
8
+ )
9
 
10
  def respond(
11
  message,
 
24
  messages.append({"role": "assistant", "content": val[1]})
25
 
26
  messages.append({"role": "user", "content": message})
27
+
28
+ # Generate response using llama_cpp
29
  response = ""
30
+ stream = llm(
31
+ messages=messages,
32
+ max_tokens=max_tokens,
33
+ temperature=temperature,
34
+ top_p=top_p,
35
+ stream=True
36
+ )
37
+
38
+ for output in stream:
39
+ token = output["choices"][0]["text"]
40
  response += token
41
  yield response
42
 
43
+ # Create Gradio ChatInterface
 
 
 
44
  demo = gr.ChatInterface(
45
  respond,
46
  additional_inputs=[
 
57
  ],
58
  )
59
 
 
60
  if __name__ == "__main__":
61
+ demo.launch()