import os import gradio as gr from openai import OpenAI import json import requests import datetime import tempfile openai_api_key = os.getenv("OPENROUTER_API_KEY") openai_base_url = os.getenv("OPENAI_BASE_URL") ai_model = os.getenv("AI_MODEL") reasoning_ai_model = os.getenv("REASONING_AI_MODEL") # Configure the OpenAI client with your custom API endpoint and API key. client = OpenAI(base_url=openai_base_url, api_key=openai_api_key) medical_recommendations = "MEDICAL RECOMMENDATIONS:\n\n" + "Birth control options sorted by effectiveness (typical-use rates), with brief pros, cons, and side effects:\n\nHighly Effective Methods (failure rate <1%)\n-Sterilization\n- Prevention rate: >99%\n- Pros: Permanent and low maintenance\n- Cons: Irreversible; requires surgery\n- side effects: Surgical risks (infection, pain)\n\n-Intrauterine Devices (IUDs) – Hormonal and Copper\n- Prevention rate: >99%\n- Pros: Long-term (3–10 years), low maintenance, reversible\n- Cons: Requires provider insertion; possible initial discomfort\n- Side effects:\n - Hormonal IUD: Initial irregular bleeding\n - Copper IUD: Heavier periods, cramping; rare risk of expulsion or uterine perforation\n\n-Implant (e.g., Nexplanon)\n- Prevention rate: >99%\n- Pros: Lasts up to 3 years, low maintenance, reversible\n- Cons: Requires minor procedure for insertion and removal; may cause irregular bleeding\n- Side effects: Mood changes, headaches, weight gain, pain at insertion site\n\nModerately Effective Methods (failure rate ~1–9%)\n-Injectable (e.g., Depo-Provera)\n- Prevention rate: ~96%\n- Pros: Injection every 3 months; high efficacy when on schedule\n- Cons: Can cause irregular bleeding; fertility may be delayed after stopping\n- Side effects: Weight gain, mood swings, potential bone density loss, injection site reactions\n\n-Oral Contraceptive Pills (combined or progestin-only)\n- Prevention rate: ~91%\n- Pros: Regulates cycles, may reduce cramps and help with acne; quick return to fertility\n- Cons: Must be taken daily; effectiveness depends on correct use\n- Side effects: Risk of blood clots (especially for smokers or women over 35), nausea, breast tenderness, mood changes, possible increased blood pressure\n- Prescriptions: Yaz, Yasmin, Ortho TriCyclen, Alesse, Loestrin\n- OTC: OPill $20/month, Taken Daily\n\n-Transdermal Patch (e.g., Ortho Evra)\n- Prevention rate: ~91%\n- Pros: Weekly application; steady hormone delivery\n- Cons: May cause skin irritation; visible on skin; less effective if detached\n- Side effects: Similar to pills (blood clots, nausea, breast tenderness, headaches)\n\n-Vaginal Ring (e.g., NuvaRing)\n- Prevention rate: ~91%\n- Pros: Monthly insertion; lower systemic hormone levels\n- Cons: Requires comfort with insertion and removal; possible vaginal discomfort\n- Side effects: Risk of blood clots, mood changes, headaches, vaginal irritation\n\nLess Effective Methods (failure rate 10% or higher)\n-Barrier Methods\n- Male Condoms\n - Prevention rate: ~87%\n - Pros: Also protect against STIs; non-hormonal; widely available\n - Cons: Effectiveness depends on correct use; may break or slip\n - Side effects: Possible latex allergy\n- Female Condoms\n - Prevention rate: ~79%\n - Pros: Offer STI protection; female-controlled\n - Cons: More expensive; less available; may be harder to use\n - Side effects: Possible irritation or allergic reaction\n- Diaphragms and Cervical Caps\n - Prevention rate: ~83–88%\n - Pros: Reusable; non-hormonal\n - Cons: Must be used with spermicide; requires proper fitting and timing\n - Side effects: Potential irritation, allergic reactions, increased risk of UTIs\n\n-Spermicides (used alone)\n- Prevention rate: ~79%\n- Pros: Over-the-counter; can be combined with other methods\n- Cons: Lower effectiveness when used alone; requires application every time\n- Side effects: May cause irritation or allergic reactions\n\n-Fertility Awareness Methods\n- Prevention rate: ~76% (varies widely)\n- Pros: No hormones or devices; increases understanding of your cycle\n- Cons: Requires daily tracking and strict adherence; higher risk of unintended pregnancy if not followed precisely\n- Side effects: None medically\n\n-Withdrawal (Coitus Interruptus)\n- Prevention rate: ~78%\n- Pros: No cost; non-hormonal\n- Cons: Relies heavily on timing; risk from pre-ejaculate\n- Side effects: No direct side effects; may cause stress if it fails\n\nTreatments for Managing HIV:\n- **Integrase Strand Transfer Inhibitor (INSTI)–Based Regimens:** \n - Bictegravir/emtricitabine/tenofovir alafenamide (Biktarvy) \n - Dolutegravir/lamivudine/abacavir (Triumeq)\n\n- **Non-Nucleoside Reverse Transcriptase Inhibitor (NNRTI)–Based Regimen:** \n - Efavirenz/emtricitabine/tenofovir disoproxil fumarate (Atripla)\n\n- **Protease Inhibitor (PI)–Based Regimen:** \n - Darunavir (boosted with ritonavir or cobicistat) plus two NRTIs" SYSTEM_PROMPT = "You are a medical professional. Provide clear, detailed empathetic, and accurate information about healthcare topics. **You must always use markdown and bullet points.** Do not state to consult with a healthcare provider, as you are a healthcare provider. Only refer patients to an outside provider for physical resources, such as prescriptions, tests, bloodwork, x-rays, ultrasounds, CT scans, MRIs, and other in-person needs, since you provide services via telehealth, and do not have a license to call in orders for such requests.\n\n" # + medical_recommendations # UI text and styling DESCRIPTION = '''
HealthAssistant Logo

HealthAssistant

''' LICENSE = """

Disclaimer: This Health Assistant is designed to provide helpful healthcare information; however, it may contain errors and is not intended to replace professional medical care. It does not diagnose any condition or disease. Always consult with a qualified healthcare provider for any medical concerns. Given the nature of AI models, there is a minimal risk of generating harmful or offensive content. Please exercise caution and use common sense. User Acknowledgment: I hereby confirm that I am at least 18 years of age (or accompanied by a legal guardian who is at least 18 years old), understand that the information provided by this service is for informational purposes only and is not intended to diagnose or treat any medical condition, and acknowledge that I am solely responsible for verifying any information provided.

""" PLACEHOLDER = """

The "Doctor" is in.

Available for free. Always verify responses with outside information.

""" css = """ h1 { text-align: center; display: block; } #duplicate-button { margin: auto; color: white; background: #1565c0; border-radius: 100vh; } """ # List of (phrase, replacement) pairs. replacements = [ ("a healthcare provider", "me or a healthcare provider"), ("a healthcare professional", "me or a healthcare professional"), ("a doctor", "me or a doctor") # Add more pairs as needed. ] # Calculate the maximum length of any phrase. max_phrase_length = max(len(phrase) for phrase, _ in replacements) MIN_FLUSH_SIZE = max(50, max_phrase_length * 2) def think(request): url = "https://openrouter.ai/api/v1/chat/completions" headers = { "Authorization": f"Bearer {openai_api_key}", "Content-Type": "application/json" } def do_req(model, content, include_reasoning=False, reasoning=""): messages = content if messages[-1]["role"] == "user": messages[-1]["content"] += " Please think this through, but don't output an answer." payload = { "model": model, "messages": messages, "include_reasoning": include_reasoning } return requests.post(url, headers=headers, data=json.dumps(payload)) # R1 will reliably return "done" for the content portion of the response reasoning_response = do_req(reasoning_ai_model, request, True) reasoning = reasoning_response.json()['choices'][0]['message']['reasoning'] return reasoning def apply_replacements(text): """ Replace all specified phrases in the text. """ for phrase, replacement in replacements: text = text.replace(phrase, replacement) return text def chat_with_openai(message: str, history: list, temperature: float, max_new_tokens: int, fast_mode: bool = False): """ Call the OpenAI ChatCompletion endpoint using the new client and yield streaming responses. Implements logic and retries if the full response is blank. Args: message (str): The latest user message. history (list): Conversation history as a list of (user, assistant) tuples. temperature (float): Sampling temperature. max_new_tokens (int): Maximum tokens to generate. Yields: str: Partial cumulative output from the assistant. """ conversation = [] if (not history and message.startswith("Start a talk therapy session with me.")) or \ any(user_msg.startswith("Start a talk therapy session with me.") for user_msg, _ in history): fast_mode = True if not history: # Initialize with system prompt and assistant confirmation. conversation.append({"role": "system", "content": SYSTEM_PROMPT}) conversation.append({"role": "assistant", "content": "Understood! I will act as the user's healthcare provider..."}) for user_msg, assistant_msg in history: conversation.append({"role": "user", "content": user_msg}) conversation.append({"role": "assistant", "content": assistant_msg}) conversation.append({"role": "user", "content": message}) if not fast_mode: # Indicate that the assistant is thinking. yield "HealthAssistant is Thinking! Please wait, your response will output shortly. This may take 10-30 seconds...\n\n" think_result = think(conversation) conversation.append({"role": "assistant", "content": "\n" + think_result + "\n I will now respond to the user's message:\n\n"}) else: yield "HealthAssistant is Thinking! Please wait, your response will output shortly...\n\n" attempt = 0 response = None while attempt < 5: if attempt == 4 and not fast_mode: del conversation[-1] attempt += 1 response = client.chat.completions.create( model=ai_model, messages=conversation, temperature=temperature, max_tokens=max_new_tokens, stream=True, ) # Initialize buffers and state flags. buffer = "" pending_buffer = "" display_text = "" think_detected = False full_response = "" # Process streaming responses. for chunk in response: delta = chunk.choices[0].delta token_text = delta.content or "" full_response += token_text # Handle buffering of tokens as in previous logic. pending_buffer += token_text if len(pending_buffer) >= MIN_FLUSH_SIZE: safe_portion = pending_buffer[:-max_phrase_length] if len(pending_buffer) > max_phrase_length else "" if safe_portion: display_text += apply_replacements(safe_portion) yield display_text pending_buffer = pending_buffer[-max_phrase_length:] # Flush remaining text. if pending_buffer: safe_portion = pending_buffer display_text += apply_replacements(safe_portion) yield display_text # Check if the full response is valid. if full_response.strip(): break # Exit the loop if the response is not blank. # If no valid response was generated after 5 attempts if not full_response.strip(): yield "*The assistant did not provide a response. Please try again.*" else: # Apply replacements and append modified response to history. modified_full_response = apply_replacements(full_response) history.append((message, modified_full_response)) def export_chat(history): """Export chat history as a JSONL file in OpenAI messages format.""" if not history: return None messages = [] # Add a blank system message (to maintain format compatibility, but without revealing the actual system prompt) messages.append({"role": "system", "content": ""}) # Convert history to messages format for user_msg, assistant_msg in history: messages.append({"role": "user", "content": user_msg}) messages.append({"role": "assistant", "content": assistant_msg}) # Convert to JSONL format (each line is a JSON object) jsonl_content = "\n".join([json.dumps(msg) for msg in messages]) # Create a temporary file for download temp_file = tempfile.NamedTemporaryFile(delete=False, suffix=".jsonl", mode="w", encoding="utf-8") temp_file.write(jsonl_content) temp_file.close() return temp_file.name # Return the file path def import_chat(file): """Import chat history from a JSONL file.""" try: # Handle both file-like objects and NamedString objects if hasattr(file, 'name'): # It's a NamedString from gr.UploadButton with open(file.name, 'r', encoding='utf-8') as f: content = f.read() else: # It's a file-like object content = file.read() lines = [line.strip() for line in content.split("\n") if line.strip()] messages = [json.loads(line) for line in lines] new_history = [] i = 0 # Skip system message if it's the first one (we don't use imported system prompts) if messages and messages[0]["role"] == "system": i = 1 while i < len(messages) - 1: if messages[i]["role"] == "user" and messages[i+1]["role"] == "assistant": new_history.append((messages[i]["content"], messages[i+1]["content"])) i += 2 else: i += 1 return gr.update(value=new_history) # Update the chatbot state except Exception as e: raise gr.Error(f"Error importing chat: {str(e)}") # Create the Chatbot component. chatbot = gr.Chatbot(height=450, placeholder=PLACEHOLDER, label='HealthAssistant') # Build the Gradio interface. with gr.Blocks(css=css) as demo: gr.HTML(DESCRIPTION) # Add the checkbox directly to the layout fast_mode_checkbox = gr.Checkbox(label="Fast Mode (Skips Reasoning. Provides Immediate, Less Accurate Responses.) RECOMMENDED FOR TALK THERAPY.", value=False) chat_interface = gr.ChatInterface( fn=chat_with_openai, chatbot=chatbot, fill_height=True, additional_inputs_accordion=gr.Accordion(label="Settings", open=False, render=False, visible=False), additional_inputs=[ gr.Slider(minimum=0.6, maximum=0.6, step=0.1, value=0.6, label="Temperature", render=False, visible=False), gr.Slider(minimum=1024, maximum=4096, step=128, value=2048, label="Max new tokens", render=False, visible=False), fast_mode_checkbox, ], examples=[ ['What is PrEP, and how do I know if I need it?'], ['What medications help manage being undetectable with HIV?'], ['Start a talk therapy session with me. Begin by asking me what I would like to talk about.'], ['How can I access birth-control in states where it is regulated?'], ], cache_examples=False, ) # Add export and import buttons with gr.Row(): export_btn = gr.Button("Export Chat") import_btn = gr.UploadButton("Import Chat", file_types=[".jsonl"], visible=False) # Connect buttons to functions export_btn.click(fn=export_chat, inputs=chatbot, outputs=gr.File(label="Download Chat History")) import_btn.upload(fn=import_chat, inputs=[import_btn], outputs=chatbot) # Fixed connection gr.Markdown(LICENSE) if __name__ == "__main__": demo.launch(share=True)