MediBuddy / app.py
luthrabhuvan's picture
Update app.py
69f0168 verified
raw
history blame
5.74 kB
# app.py
import os
import gradio as gr
import torch
import requests
from transformers import pipeline, AutoModelForTokenClassification, AutoTokenizer
from monai.networks.nets import DenseNet121
import torchxrayvision as xrv
# Configuration
DEEPSEEK_API_URL = "https://api.deepseek.com/v1/chat/completions"
DEEPSEEK_API_KEY = os.getenv("DEEPSEEK_API_KEY") # Set in Hugging Face secrets
DISCLAIMER = """
<div style="color: red; border: 2px solid red; padding: 15px; margin: 10px;">
⚠️ WARNING: This is a prototype demonstration only. NOT ACTUAL MEDICAL ADVICE.
DO NOT USE FOR REAL HEALTH DECISIONS. CONSULT LICENSED PROFESSIONALS.
</div>
"""
class MedicalAssistant:
def __init__(self):
# Medical imaging models
self.medical_models = self._init_imaging_models()
# Clinical text processing
self.prescription_parser = pipeline(
"token-classification",
model="obi/deid_bert_i2b2",
tokenizer="obi/deid_bert_i2b2"
)
# Safety systems
self.safety_filter = pipeline(
"text-classification",
model="Hate-speech-CNERG/dehatebert-mono-english"
)
def _init_imaging_models(self):
"""Initialize medical imaging models"""
return {
"xray": xrv.models.DenseNet(weights="densenet121-res224-all"),
"ct": DenseNet121(spatial_dims=3, in_channels=1, out_channels=14),
"histo": torch.hub.load('pytorch/vision', 'resnet50', pretrained=True)
}
def query_deepseek(self, prompt: str):
"""Query DeepSeek medical research API"""
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {DEEPSEEK_API_KEY}"
}
payload = {
"model": "deepseek-medical-1.0",
"messages": [{
"role": "user",
"content": f"MEDICAL PROMPT: {prompt}\nRespond with latest research-supported information. Cite sources."
}],
"temperature": 0.2,
"max_tokens": 500
}
try:
response = requests.post(DEEPSEEK_API_URL, json=payload, headers=headers)
response.raise_for_status()
return response.json()['choices'][0]['message']['content']
except Exception as e:
return f"API Error: {str(e)}"
def analyze_image(self, image_path: str, modality: str):
"""Medical image analysis"""
try:
img = self._preprocess_image(image_path, modality)
if modality == "xray":
output = self.medical_models["xray"](img)
return xrv.datasets.default_pathologies[torch.argmax(output)]
elif modality == "ct":
output = self.medical_models["ct"](img)
return "CT analysis placeholder"
elif modality == "histo":
output = self.medical_models["histo"](img)
return "Histopathology analysis placeholder"
except Exception as e:
return f"Image analysis error: {str(e)}"
def parse_prescription(self, text: str):
"""Clinical text parsing"""
entities = self.prescription_parser(text)
return {
"medications": [ent for ent in entities if ent['entity'] == 'MEDICATION'],
"dosage": [ent for ent in entities if ent['entity'] == 'DOSAGE']
}
def generate_response(self, query: str, context: dict):
"""Generate safe, research-backed response"""
# Construct enhanced prompt
research_prompt = f"""
Medical Query: {query}
Context:
- Image Findings: {context.get('image_analysis', 'N/A')}
- Prescription Data: {context.get('prescription', 'N/A')}
Requirements:
1. Provide evidence-based medical information
2. Cite recent research (post-2020 when possible)
3. Include safety considerations
4. Note confidence level
"""
# Get DeepSeek research response
raw_response = self.query_deepseek(research_prompt)
# Apply safety filters
if self._is_unsafe(raw_response):
return "I cannot provide advice on this matter. Please consult a healthcare professional."
return self._add_disclaimer(raw_response)
def _is_unsafe(self, text: str):
"""Content safety check"""
return self.safety_filter(text)[0]['label'] == 'HATE'
def _add_disclaimer(self, text: str):
"""Add legal disclaimer to response"""
return f"{text}\n\n---\n⚠️ This information is for research purposes only. Not medical advice."
def _preprocess_image(self, image_path: str, modality: str):
"""Image preprocessing placeholder"""
return torch.rand((1, 224, 224))
# Initialize system
assistant = MedicalAssistant()
def process_input(query, image, prescription):
context = {}
if image is not None:
context["image_analysis"] = assistant.analyze_image(image, "xray")
if prescription:
context["prescription"] = assistant.parse_prescription(prescription)
return assistant.generate_response(query, context)
# Gradio interface
interface = gr.Interface(
fn=process_input,
inputs=[
gr.Textbox(label="Medical Query", placeholder="Enter your medical question..."),
gr.Image(label="Medical Imaging", type="filepath"),
gr.Textbox(label="Prescription Text")
],
outputs=gr.Textbox(label="Research-Backed Response"),
title="AI Medical Research Assistant",
description=DISCLAIMER,
allow_flagging="never"
)
interface.launch()