yogies commited on
Commit
859ac01
·
verified ·
1 Parent(s): de611b5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +27 -21
app.py CHANGED
@@ -3,16 +3,20 @@ import gradio as gr
3
  from huggingface_hub import InferenceClient
4
 
5
  # ----------------------------------------------------------------------
6
- # Helper: read a secret with a safe fallback (useful when you run the
7
- # script locally without a secrets file).
8
  # ----------------------------------------------------------------------
9
- def _secret(key: str, fallback: str) -> str:
 
 
 
 
 
10
  """Return the value of a secret or the supplied fallback."""
11
  return os.getenv(key, fallback)
12
 
13
 
14
  # ----------------------------------------------------------------------
15
- # Core chat logic – the system prompt now comes from the secret `prec_chat`.
16
  # ----------------------------------------------------------------------
17
  def respond(
18
  message: str,
@@ -28,18 +32,18 @@ def respond(
28
  The system prompt is taken from the secret **prec_chat**.
29
  Users cannot edit it from the UI.
30
  """
31
- # 1️⃣ Load the system prompt (fallback = generic assistant)
32
  system_message = _secret("prec_chat", "You are a helpful assistant.")
33
 
34
- # 2️⃣ Initialise the HF inference client.
35
  client = InferenceClient(token=hf_token.token, model="openai/gpt-oss-20b")
36
 
37
- # 3️⃣ Build the message list for the chat completion endpoint.
38
  messages = [{"role": "system", "content": system_message}]
39
- messages.extend(history) # previous conversation
40
- messages.append({"role": "user", "content": message}) # current query
41
 
42
- # 4️⃣ Stream the response back to the UI.
43
  response = ""
44
  for chunk in client.chat_completion(
45
  messages,
@@ -57,7 +61,7 @@ def respond(
57
 
58
 
59
  # ----------------------------------------------------------------------
60
- # UI definition – the system‑prompt textbox has been removed.
61
  # ----------------------------------------------------------------------
62
  chatbot = gr.ChatInterface(
63
  respond,
@@ -77,30 +81,32 @@ chatbot = gr.ChatInterface(
77
  )
78
 
79
  # ----------------------------------------------------------------------
80
- # Build the Blocks layout (no LoginButton – we use our own auth).
81
  # ----------------------------------------------------------------------
82
  with gr.Blocks() as demo:
83
  chatbot.render()
84
 
85
 
86
  # ----------------------------------------------------------------------
87
- # Launch with **basic authentication**.
88
  # ----------------------------------------------------------------------
89
  if __name__ == "__main__":
90
- # Pull the allowed credentials from secrets (fallback = no access)
91
- allowed_user = _secret("CHAT_USER", "")
92
- allowed_pass = _secret("CHAT_PASS", "")
93
 
94
- # If either is missing we refuse to start – this prevents an accidental
95
- # open‑access deployment.
96
  if not allowed_user or not allowed_pass:
97
  raise RuntimeError(
98
  "Authentication credentials not found in secrets. "
99
- "Add CHAT_USER and CHAT_PASS to secrets.toml."
100
  )
101
 
102
  demo.launch(
103
  auth=(allowed_user, allowed_pass), # <-- Gradio's built‑in basic auth
104
- # optional: you can also set `auth_message="Please log in"` or
105
- # `prevent_thread_lock=True` depending on your deployment.
 
 
 
 
106
  )
 
3
  from huggingface_hub import InferenceClient
4
 
5
  # ----------------------------------------------------------------------
6
+ # 1️⃣ Force the UI language (prevents the “svelte‑i18n” error)
 
7
  # ----------------------------------------------------------------------
8
+ gr.set_default_language("en") # English UI change if you need another locale
9
+
10
+ # ----------------------------------------------------------------------
11
+ # Helper to read a secret (with a safe fallback for local testing)
12
+ # ----------------------------------------------------------------------
13
+ def _secret(key: str, fallback: str = "") -> str:
14
  """Return the value of a secret or the supplied fallback."""
15
  return os.getenv(key, fallback)
16
 
17
 
18
  # ----------------------------------------------------------------------
19
+ # 2️⃣ Core chat logic – system prompt comes from the secret `prec_chat`
20
  # ----------------------------------------------------------------------
21
  def respond(
22
  message: str,
 
32
  The system prompt is taken from the secret **prec_chat**.
33
  Users cannot edit it from the UI.
34
  """
35
+ # Load the system prompt (fallback = generic assistant)
36
  system_message = _secret("prec_chat", "You are a helpful assistant.")
37
 
38
+ # Initialise the HF inference client
39
  client = InferenceClient(token=hf_token.token, model="openai/gpt-oss-20b")
40
 
41
+ # Build the message list for the chatcompletion endpoint
42
  messages = [{"role": "system", "content": system_message}]
43
+ messages.extend(history) # previous conversation turns
44
+ messages.append({"role": "user", "content": message}) # current user query
45
 
46
+ # Stream the response back to the UI
47
  response = ""
48
  for chunk in client.chat_completion(
49
  messages,
 
61
 
62
 
63
  # ----------------------------------------------------------------------
64
+ # 3️⃣ UI – the system‑prompt textbox has been removed.
65
  # ----------------------------------------------------------------------
66
  chatbot = gr.ChatInterface(
67
  respond,
 
81
  )
82
 
83
  # ----------------------------------------------------------------------
84
+ # 4️⃣ Assemble the Blocks layout (no LoginButton – we use basic auth)
85
  # ----------------------------------------------------------------------
86
  with gr.Blocks() as demo:
87
  chatbot.render()
88
 
89
 
90
  # ----------------------------------------------------------------------
91
+ # 5️⃣ Launch – protect the UI with the credentials from secrets.
92
  # ----------------------------------------------------------------------
93
  if __name__ == "__main__":
94
+ # Pull the allowed credentials from secrets (raise early if they are missing)
95
+ allowed_user = _secret("CHAT_USER")
96
+ allowed_pass = _secret("CHAT_PASS")
97
 
 
 
98
  if not allowed_user or not allowed_pass:
99
  raise RuntimeError(
100
  "Authentication credentials not found in secrets. "
101
+ "Add CHAT_USER and CHAT_PASS to secrets.toml (or via the HF Spaces UI)."
102
  )
103
 
104
  demo.launch(
105
  auth=(allowed_user, allowed_pass), # <-- Gradio's built‑in basic auth
106
+ # In a remote environment (HF Spaces, Docker, cloud VM) you need a shareable link:
107
+ share=True, # <-- remove if you run locally and can reach http://0.0.0.0:7860
108
+ # Optional – makes the server listen on all interfaces (useful in containers)
109
+ server_name="0.0.0.0",
110
+ # Optional – you can set a custom title, favicon, etc.
111
+ # title="Secure Chatbot",
112
  )