reedmayhew commited on
Commit
38bd2d4
·
verified ·
1 Parent(s): a49f925

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +67 -98
app.py CHANGED
@@ -1,34 +1,24 @@
1
- import gradio as gr
2
  import os
3
- import spaces
4
- from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
5
- from threading import Thread
6
-
7
- # Set an environment variable
8
- HF_TOKEN = os.environ.get("HF_TOKEN", None)
9
-
10
- SYSTEM_PROMPT = "You are an AI Healthcare assistant. Provide clear, empathetic, and accurate information about healthcare topics. Do not state It's always best to consult with a healthcare provider, as you are simulating the role of a healthcare provider. A disclaimer has been placed elsewhere for the user that informs them to be mindful about your replies, so your job is to provide them with as much information as possible and let them make decisions themselves. Only refer them to an outside provider for physical resources, such as prescriptions, tests, bloodwork, x-rays, ultrasounds, CT scans, MRIs, and other similar things, since you are virtual."
11
 
 
 
12
 
 
 
13
  DESCRIPTION = '''
14
  <div>
15
- <h1 style="text-align: center;">HealthAssistant</h1>
16
  </div>
17
  '''
18
-
19
- LICENSE = """
20
- <p>
21
- This Health Assistant is designed to provide helpful healthcare information; however, it may make mistakes and is not designed to replace professional medical care. It is not intended to diagnose any condition or disease. Always consult with a qualified healthcare provider for any medical concerns.\n\nI hereby confirm that I am at least 18 years of age (or accompanied by a legal guardian who is at least 18 years old), understand that the information provided by this service is for informational purposes only and is not intended to diagnose or treat any medical condition, and acknowledge that I am solely responsible for verifying any information provided.
22
- </p>
23
- """
24
-
25
  PLACEHOLDER = """
26
  <div style="padding: 30px; text-align: center; display: flex; flex-direction: column; align-items: center;">
27
  <h1 style="font-size: 28px; margin-bottom: 2px; opacity: 0.55;">The "Doctor" is in.</h1>
28
  <p style="font-size: 18px; margin-bottom: 2px; opacity: 0.65;">Available 1:00pm - 5:00pm EST</p>
29
  </div>
30
  """
31
-
32
  css = """
33
  h1 {
34
  text-align: center;
@@ -43,116 +33,95 @@ h1 {
43
  }
44
  """
45
 
46
- # Load the tokenizer and model
47
- tokenizer = AutoTokenizer.from_pretrained("reedmayhew/HealthCare-Reasoning-Assistant-Llama-3.1-8B-HF", device_map="cuda")
48
- model = AutoModelForCausalLM.from_pretrained("reedmayhew/HealthCare-Reasoning-Assistant-Llama-3.1-8B-HF", device_map="cuda")
49
-
50
- terminators = [
51
- tokenizer.eos_token_id,
52
- tokenizer.convert_tokens_to_ids("<|eot_id|>")
53
- ]
54
-
55
- @spaces.GPU(duration=60)
56
- def chat_llama3_8b(message: str,
57
- history: list,
58
- temperature: float,
59
- max_new_tokens: int
60
- ) -> str:
61
  """
62
- Generate a streaming response using the llama3-8b model.
 
 
 
 
 
63
  Args:
64
- message (str): The input message.
65
- history (list): The conversation history used by ChatInterface.
66
- temperature (float): The temperature for generating the response.
67
- max_new_tokens (int): The maximum number of new tokens to generate.
68
- Returns:
69
- str: The generated response.
70
- """
71
 
 
 
 
72
  conversation = []
73
-
74
- # Add system prompt only if this is a new conversation (i.e., history is empty)
75
  if not history:
 
76
  conversation.append({"role": "system", "content": SYSTEM_PROMPT})
77
- conversation.append({"role": "assistant", "content": "Understood! I will act as the user's healthcare provider, not hesitate with providing detailed responses, or refer them to another healthcare provider unless they ask for one or need a physical resource, as you have already given them a disclaimer."})
78
-
79
-
80
- for user, assistant in history:
81
- conversation.extend([
82
- {"role": "user", "content": user},
83
- {"role": "assistant", "content": assistant}
84
- ])
85
-
86
- # Ensure the model starts with "<think>"
87
  conversation.append({"role": "user", "content": message})
88
- conversation.append({"role": "assistant", "content": "<think> "}) # Force <think> at start
 
89
 
90
- input_ids = tokenizer.apply_chat_template(conversation, return_tensors="pt").to(model.device)
91
-
92
- streamer = TextIteratorStreamer(tokenizer, timeout=10.0, skip_prompt=True, skip_special_tokens=True)
 
 
 
 
93
 
94
- generate_kwargs = dict(
95
- input_ids=input_ids,
96
- streamer=streamer,
97
- max_new_tokens=max_new_tokens,
98
- do_sample=True,
99
  temperature=temperature,
100
- eos_token_id=terminators,
 
101
  )
102
-
103
- if temperature == 0:
104
- generate_kwargs['do_sample'] = False
105
-
106
- t = Thread(target=model.generate, kwargs=generate_kwargs)
107
- t.start()
108
-
109
- outputs = []
110
- buffer = ""
111
- think_detected = False
112
- thinking_message_sent = False
113
- full_response = "" # Store the full assistant response
114
 
115
- for text in streamer:
116
- buffer += text
117
- full_response += text # Store raw assistant response (includes <think>)
 
 
 
118
 
119
- # Send the "thinking" message once text starts generating
120
- if not thinking_message_sent:
121
- thinking_message_sent = True
122
- yield "A.I. Healthcare is Thinking! Please wait, your response will output shortly...\n\n"
123
-
124
- # Wait until </think> is detected before streaming output
125
  if not think_detected:
 
 
126
  if "</think>" in buffer:
127
  think_detected = True
128
- buffer = buffer.split("</think>", 1)[1] # Remove <think> section
 
 
129
  else:
130
- outputs.append(text)
131
- yield "".join(outputs)
132
 
133
- # Store the full response (including <think>) in history, but only show the user the cleaned response
134
- history.append((message, full_response)) # Full assistant response saved for context
135
 
136
- # Gradio block
137
  chatbot = gr.Chatbot(height=450, placeholder=PLACEHOLDER, label='HealthAssistant')
138
 
139
- with gr.Blocks(fill_height=True, css=css) as demo:
140
-
141
  gr.Markdown(DESCRIPTION)
 
142
  gr.ChatInterface(
143
- fn=chat_llama3_8b,
144
  chatbot=chatbot,
145
  fill_height=True,
146
  additional_inputs_accordion=gr.Accordion(label="⚙️ Parameters", open=False, render=False),
147
  additional_inputs=[
148
- gr.Slider(minimum=0.6, maximum=0.6, step=0.1, value=0.6, label="Temperature", render=False),
149
  gr.Slider(minimum=1024, maximum=4096, step=128, value=2048, label="Max new tokens", render=False),
150
  ],
151
  examples=[
152
- ['What is PrEP, and do I need it?'],
153
- ['What medications help manage being undetectable with HIV?'],
154
- ['How do I know if an abortion is the right option?'],
155
- ['How can I access birth-control in states where it is regulated?'],
156
  ],
157
  cache_examples=False,
158
  )
 
 
1
  import os
2
+ import gradio as gr
3
+ from openai import OpenAI
 
 
 
 
 
 
4
 
5
+ # Configure the OpenAI client with your custom API endpoint and API key.
6
+ client = OpenAI(base_url="http://home.mayhew.cloud:1234/v1", api_key="lm-studio")
7
 
8
+ # UI text and styling
9
+ SYSTEM_PROMPT = "You are an assistant."
10
  DESCRIPTION = '''
11
  <div>
12
+ <h1 style="text-align: center;">HealthAssistant</h1>
13
  </div>
14
  '''
15
+ LICENSE = "<p></p>"
 
 
 
 
 
 
16
  PLACEHOLDER = """
17
  <div style="padding: 30px; text-align: center; display: flex; flex-direction: column; align-items: center;">
18
  <h1 style="font-size: 28px; margin-bottom: 2px; opacity: 0.55;">The "Doctor" is in.</h1>
19
  <p style="font-size: 18px; margin-bottom: 2px; opacity: 0.65;">Available 1:00pm - 5:00pm EST</p>
20
  </div>
21
  """
 
22
  css = """
23
  h1 {
24
  text-align: center;
 
33
  }
34
  """
35
 
36
+ def chat_with_openai(message: str, history: list, temperature: float, max_new_tokens: int):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
37
  """
38
+ Call the OpenAI ChatCompletion endpoint using the new client and yield streaming responses.
39
+ Implements <think> logic:
40
+ - The assistant is forced to begin its answer with "<think> ".
41
+ - We then wait until a closing "</think>" marker is received.
42
+ - Only text after "</think>" is displayed as the final answer.
43
+
44
  Args:
45
+ message (str): The latest user message.
46
+ history (list): Conversation history as a list of (user, assistant) tuples.
47
+ temperature (float): Sampling temperature.
48
+ max_new_tokens (int): Maximum tokens to generate.
 
 
 
49
 
50
+ Yields:
51
+ str: Partial cumulative output from the assistant.
52
+ """
53
  conversation = []
 
 
54
  if not history:
55
+ # Add a system prompt and initial assistant confirmation.
56
  conversation.append({"role": "system", "content": SYSTEM_PROMPT})
57
+ conversation.append({"role": "assistant", "content": "Understood!"})
58
+ for user_msg, assistant_msg in history:
59
+ conversation.append({"role": "user", "content": user_msg})
60
+ conversation.append({"role": "assistant", "content": assistant_msg})
 
 
 
 
 
 
61
  conversation.append({"role": "user", "content": message})
62
+ # Force the model to begin its answer with a "<think>" block.
63
+ conversation.append({"role": "assistant", "content": "<think> "})
64
 
65
+ full_response = "" # Stores the raw assistant response (including the <think> block).
66
+ buffer = "" # Accumulates tokens until we detect the closing </think>.
67
+ display_text = "" # Holds text to display (only text after </think>).
68
+ think_detected = False
69
+
70
+ # Immediately yield a "thinking" status message.
71
+ yield "A.I. Healthcare is Thinking! Please wait, your response will output shortly...\n\n"
72
 
73
+ # Call the API with streaming enabled.
74
+ response = client.chat.completions.create(
75
+ model="model-identifier", # Replace with your actual model identifier.
76
+ messages=conversation,
 
77
  temperature=temperature,
78
+ max_tokens=max_new_tokens,
79
+ stream=True,
80
  )
 
 
 
 
 
 
 
 
 
 
 
 
81
 
82
+ # Process streaming responses.
83
+ for chunk in response:
84
+ # Extract the new token text from the chunk.
85
+ delta = chunk.choices[0].delta
86
+ token_text = delta.content or ""
87
+ full_response += token_text
88
 
 
 
 
 
 
 
89
  if not think_detected:
90
+ # Accumulate tokens until we see the closing </think> marker.
91
+ buffer += token_text
92
  if "</think>" in buffer:
93
  think_detected = True
94
+ # Discard everything up to and including the "</think>" marker.
95
+ display_text = buffer.split("</think>", 1)[1]
96
+ yield display_text
97
  else:
98
+ display_text += token_text
99
+ yield display_text
100
 
101
+ # Append the full (raw) response, including the <think> section, to the conversation history.
102
+ history.append((message, full_response))
103
 
104
+ # Create the Chatbot component.
105
  chatbot = gr.Chatbot(height=450, placeholder=PLACEHOLDER, label='HealthAssistant')
106
 
107
+ # Build the Gradio interface.
108
+ with gr.Blocks(css=css) as demo:
109
  gr.Markdown(DESCRIPTION)
110
+
111
  gr.ChatInterface(
112
+ fn=chat_with_openai,
113
  chatbot=chatbot,
114
  fill_height=True,
115
  additional_inputs_accordion=gr.Accordion(label="⚙️ Parameters", open=False, render=False),
116
  additional_inputs=[
117
+ gr.Slider(minimum=0.0, maximum=1.0, step=0.1, value=0.6, label="Temperature", render=False),
118
  gr.Slider(minimum=1024, maximum=4096, step=128, value=2048, label="Max new tokens", render=False),
119
  ],
120
  examples=[
121
+ ['What is, and do I need it?'],
122
+ ['What medications help manage being invisible?'],
123
+ ['How do I know if a clown is the right option?'],
124
+ ['How can I access music in states where it is regulated?'],
125
  ],
126
  cache_examples=False,
127
  )