budikomarudin commited on
Commit
a937de5
·
verified ·
1 Parent(s): 47123c3

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +35 -45
app.py CHANGED
@@ -2,59 +2,49 @@ import gradio as gr
2
  import os
3
  from langchain_openai import ChatOpenAI
4
 
5
- # Set up the SambaNova Chat API client
6
  api_key = os.environ.get("FEATHERLESS_API_KEY")
7
- llm = ChatOpenAI(
8
- base_url="https://api.featherless.ai/v1/",
9
- api_key=api_key,
10
- streaming=True,
11
- model="mistralai/Magistral-Small-2506",
12
- )
13
 
14
- def respond(
15
- message,
16
- history: list[tuple[str, str]],
17
- system_message,
18
- max_tokens,
19
- temperature,
20
- top_p,
21
- ):
22
- messages = [{"role": "system", "content": system_message}]
23
-
24
- for val in history:
25
- if val[0]:
26
- messages.append({"role": "user", "content": val[0]})
27
- if val[1]:
28
- messages.append({"role": "assistant", "content": val[1]})
29
-
30
- messages.append({"role": "user", "content": message})
31
-
32
- # Update the ChatOpenAI instance with the current parameters
 
 
 
 
 
 
33
  llm.max_tokens = max_tokens
34
  llm.temperature = temperature
35
  llm.top_p = top_p
36
-
37
  response = ""
38
  for chunk in llm.stream(messages):
39
- token = chunk.content
40
- response += token
41
  yield response
42
 
43
- demo = gr.ChatInterface(
44
- respond,
45
- additional_inputs=[
46
- gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
47
- gr.Slider(minimum=1, maximum=16384, value=2048, step=1, label="Max new tokens"),
48
- gr.Slider(minimum=0.1, maximum=2.0, value=0.7, step=0.1, label="Temperature"),
49
- gr.Slider(
50
- minimum=0.1,
51
- maximum=1.0,
52
- value=0.95,
53
- step=0.05,
54
- label="Top-p (nucleus sampling)",
55
- ),
56
- ],
57
- )
58
 
59
- if __name__ == "__main__":
60
  demo.launch()
 
2
  import os
3
  from langchain_openai import ChatOpenAI
4
 
 
5
  api_key = os.environ.get("FEATHERLESS_API_KEY")
 
 
 
 
 
 
6
 
7
+ MODEL_CHOICES = [
8
+ "Qwen/Qwen3-32B",
9
+ "deepseek-ai/DeepSeek-R1-Distill-Llama-70B",
10
+ "meta-llama/Llama-3.3-70B-Instruct",
11
+ "mistralai/Magistral-Small-2506",
12
+ "unsloth/DeepSeek-R1-Distill-Llama-70B",
13
+ "unsloth/Qwen2.5-72B-Instruct",
14
+ "unsloth/Llama-3.3-70B-Instruct",
15
+ ]
16
+
17
+ def create_llm(model_name: str):
18
+ return ChatOpenAI(
19
+ base_url="https://api.featherless.ai/v1/",
20
+ api_key=api_key,
21
+ streaming=True,
22
+ model=model_name,
23
+ )
24
+
25
+ def respond(message, history, system_message, max_tokens, temperature, top_p, model_name):
26
+ llm = create_llm(model_name)
27
+ messages = [{"role":"system","content":system_message}]
28
+ for u, a in history:
29
+ if u: messages.append({"role":"user","content":u})
30
+ if a: messages.append({"role":"assistant","content":a})
31
+ messages.append({"role":"user","content":message})
32
  llm.max_tokens = max_tokens
33
  llm.temperature = temperature
34
  llm.top_p = top_p
 
35
  response = ""
36
  for chunk in llm.stream(messages):
37
+ response += chunk.content
 
38
  yield response
39
 
40
+ with gr.Blocks() as demo:
41
+ with gr.Row():
42
+ model_dropdown = gr.Dropdown(choices=MODEL_CHOICES, value=MODEL_CHOICES[0], label="Pilih model")
43
+ chatbot = gr.Chatbot()
44
+ system_msg = gr.Textbox("You are a friendly Chatbot.", label="System message")
45
+ max_t = gr.Slider(1, 16384, value=2048, step=1, label="Max new tokens")
46
+ temp = gr.Slider(0.1, 2.0, value=0.7, step=0.1, label="Temperature")
47
+ top_p = gr.Slider(0.1, 1.0, value=0.95, step=0.05, label="Top‑p")
 
 
 
 
 
 
 
48
 
49
+ model_dropdown.change(lambda x: None, inputs=model_dropdown, outputs=[])
50
  demo.launch()