yogies commited on
Commit
a00c633
·
verified ·
1 Parent(s): 859ac01

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +16 -22
app.py CHANGED
@@ -3,20 +3,14 @@ import gradio as gr
3
  from huggingface_hub import InferenceClient
4
 
5
  # ----------------------------------------------------------------------
6
- # 1️⃣ Force the UI language (prevents the “svelte‑i18n” error)
7
- # ----------------------------------------------------------------------
8
- gr.set_default_language("en") # English UI – change if you need another locale
9
-
10
- # ----------------------------------------------------------------------
11
- # Helper to read a secret (with a safe fallback for local testing)
12
  # ----------------------------------------------------------------------
13
  def _secret(key: str, fallback: str = "") -> str:
14
- """Return the value of a secret or the supplied fallback."""
15
  return os.getenv(key, fallback)
16
 
17
 
18
  # ----------------------------------------------------------------------
19
- # 2️⃣ Core chat logic – system prompt comes from the secret `prec_chat`
20
  # ----------------------------------------------------------------------
21
  def respond(
22
  message: str,
@@ -32,18 +26,18 @@ def respond(
32
  The system prompt is taken from the secret **prec_chat**.
33
  Users cannot edit it from the UI.
34
  """
35
- # Load the system prompt (fallback = generic assistant)
36
  system_message = _secret("prec_chat", "You are a helpful assistant.")
37
 
38
- # Initialise the HF inference client
39
  client = InferenceClient(token=hf_token.token, model="openai/gpt-oss-20b")
40
 
41
- # Build the message list for the chat‑completion endpoint
42
  messages = [{"role": "system", "content": system_message}]
43
- messages.extend(history) # previous conversation turns
44
- messages.append({"role": "user", "content": message}) # current user query
45
 
46
- # Stream the response back to the UI
47
  response = ""
48
  for chunk in client.chat_completion(
49
  messages,
@@ -61,7 +55,7 @@ def respond(
61
 
62
 
63
  # ----------------------------------------------------------------------
64
- # 3️⃣ UI – the system‑prompt textbox has been removed.
65
  # ----------------------------------------------------------------------
66
  chatbot = gr.ChatInterface(
67
  respond,
@@ -81,17 +75,17 @@ chatbot = gr.ChatInterface(
81
  )
82
 
83
  # ----------------------------------------------------------------------
84
- # 4️⃣ Assemble the Blocks layout (no LoginButton – we use basic auth)
85
  # ----------------------------------------------------------------------
86
  with gr.Blocks() as demo:
87
  chatbot.render()
88
 
89
 
90
  # ----------------------------------------------------------------------
91
- # 5️⃣ Launch – protect the UI with the credentials from secrets.
92
  # ----------------------------------------------------------------------
93
  if __name__ == "__main__":
94
- # Pull the allowed credentials from secrets (raise early if they are missing)
95
  allowed_user = _secret("CHAT_USER")
96
  allowed_pass = _secret("CHAT_PASS")
97
 
@@ -103,10 +97,10 @@ if __name__ == "__main__":
103
 
104
  demo.launch(
105
  auth=(allowed_user, allowed_pass), # <-- Gradio's built‑in basic auth
 
 
106
  # In a remote environment (HF Spaces, Docker, cloud VM) you need a shareable link:
107
- share=True, # <-- remove if you run locally and can reach http://0.0.0.0:7860
108
- # Optional – makes the server listen on all interfaces (useful in containers)
109
  server_name="0.0.0.0",
110
- # Optional – you can set a custom title, favicon, etc.
111
- # title="Secure Chatbot",
112
  )
 
3
  from huggingface_hub import InferenceClient
4
 
5
  # ----------------------------------------------------------------------
6
+ # Helper to read a secret (fallback is useful when you run locally)
 
 
 
 
 
7
  # ----------------------------------------------------------------------
8
  def _secret(key: str, fallback: str = "") -> str:
 
9
  return os.getenv(key, fallback)
10
 
11
 
12
  # ----------------------------------------------------------------------
13
+ # Core chat logic – system prompt comes from the secret `prec_chat`
14
  # ----------------------------------------------------------------------
15
  def respond(
16
  message: str,
 
26
  The system prompt is taken from the secret **prec_chat**.
27
  Users cannot edit it from the UI.
28
  """
29
+ # 1️⃣ Load the system prompt (fallback = generic assistant)
30
  system_message = _secret("prec_chat", "You are a helpful assistant.")
31
 
32
+ # 2️⃣ Initialise the HF inference client
33
  client = InferenceClient(token=hf_token.token, model="openai/gpt-oss-20b")
34
 
35
+ # 3️⃣ Build the message list for the chat‑completion endpoint
36
  messages = [{"role": "system", "content": system_message}]
37
+ messages.extend(history) # previous turns
38
+ messages.append({"role": "user", "content": message}) # current query
39
 
40
+ # 4️⃣ Stream the response back to the UI
41
  response = ""
42
  for chunk in client.chat_completion(
43
  messages,
 
55
 
56
 
57
  # ----------------------------------------------------------------------
58
+ # UI – the system‑prompt textbox has been removed.
59
  # ----------------------------------------------------------------------
60
  chatbot = gr.ChatInterface(
61
  respond,
 
75
  )
76
 
77
  # ----------------------------------------------------------------------
78
+ # Assemble the Blocks layout (no LoginButton – we use basic auth)
79
  # ----------------------------------------------------------------------
80
  with gr.Blocks() as demo:
81
  chatbot.render()
82
 
83
 
84
  # ----------------------------------------------------------------------
85
+ # Launch – protect the UI with the credentials from secrets.
86
  # ----------------------------------------------------------------------
87
  if __name__ == "__main__":
88
+ # Pull the allowed credentials from secrets (fail fast if missing)
89
  allowed_user = _secret("CHAT_USER")
90
  allowed_pass = _secret("CHAT_PASS")
91
 
 
97
 
98
  demo.launch(
99
  auth=(allowed_user, allowed_pass), # <-- Gradio's built‑in basic auth
100
+ # Turn off server‑side rendering to avoid the i18n locale error
101
+ ssr_mode=False,
102
  # In a remote environment (HF Spaces, Docker, cloud VM) you need a shareable link:
103
+ share=True, # <-- remove if you run locally and can reach http://127.0.0.1:7860
104
+ # Optional – listen on all interfaces (useful inside containers)
105
  server_name="0.0.0.0",
 
 
106
  )