File size: 5,805 Bytes
79989e7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
# app.py
import os
import gradio as gr
import torch
import requests
from transformers import pipeline, AutoModelForTokenClassification, AutoTokenizer
from monai.networks.nets import DenseNet121
import torchxrayvision as xrv

# Configuration
DEEPSEEK_API_URL = "https://api.deepseek.com/v1/chat/completions"
DEEPSEEK_API_KEY = os.getenv("DEEPSEEK_API_KEY")  # Set in Hugging Face secrets

DISCLAIMER = """
<div style="color: red; border: 2px solid red; padding: 15px; margin: 10px;">
⚠️ WARNING: This is a prototype demonstration only. NOT ACTUAL MEDICAL ADVICE. 
DO NOT USE FOR REAL HEALTH DECISIONS. CONSULT LICENSED PROFESSIONALS.
</div>
"""

class MedicalAssistant:
    def __init__(self):
        # Medical imaging models
        self.medical_models = self._init_imaging_models()
        
        # Clinical text processing
        self.prescription_parser = pipeline(
            "token-classification", 
            model="obi/deid_bert_i2b2",
            tokenizer="obi/deid_bert_i2b2"
        )
        
        # Safety systems
        self.safety_filter = pipeline(
            "text-classification", 
            model="Hate-speech-CNERG/dehatebert-mono-english"
        )

    def _init_imaging_models(self):
        """Initialize medical imaging models"""
        return {
            "xray": xrv.models.DenseNet(weights="densenet121-res224-all"),
            "ct": DenseNet121(spatial_dims=3, in_channels=1, out_channels=14),
            "histo": torch.hub.load('pytorch/vision', 'resnet50', pretrained=True)
        }

    def query_deepseek(self, prompt: str):
        """Query DeepSeek medical research API"""
        headers = {
            "Content-Type": "application/json",
            "Authorization": f"Bearer {DEEPSEEK_API_KEY}"
        }
        
        payload = {
            "model": "deepseek-medical-1.0",
            "messages": [{
                "role": "user",
                "content": f"MEDICAL PROMPT: {prompt}\nRespond with latest research-supported information. Cite sources."
            }],
            "temperature": 0.2,
            "max_tokens": 500
        }

        try:
            response = requests.post(DEEPSEEK_API_URL, json=payload, headers=headers)
            response.raise_for_status()
            return response.json()['choices'][0]['message']['content']
        except Exception as e:
            return f"API Error: {str(e)}"

    def analyze_image(self, image_path: str, modality: str):
        """Medical image analysis"""
        try:
            img = self._preprocess_image(image_path, modality)
            
            if modality == "xray":
                output = self.medical_models["xray"](img)
                return xrv.datasets.default_pathologies[torch.argmax(output)]
            elif modality == "ct":
                output = self.medical_models["ct"](img)
                return "CT analysis placeholder"
            elif modality == "histo":
                output = self.medical_models["histo"](img)
                return "Histopathology analysis placeholder"
        except Exception as e:
            return f"Image analysis error: {str(e)}"

    def parse_prescription(self, text: str):
        """Clinical text parsing"""
        entities = self.prescription_parser(text)
        return {
            "medications": [ent for ent in entities if ent['entity'] == 'MEDICATION'],
            "dosage": [ent for ent in entities if ent['entity'] == 'DOSAGE']
        }

    def generate_response(self, query: str, context: dict):
        """Generate safe, research-backed response"""
        # Construct enhanced prompt
        research_prompt = f"""
        Medical Query: {query}
        Context:
        - Image Findings: {context.get('image_analysis', 'N/A')}
        - Prescription Data: {context.get('prescription', 'N/A')}
        
        Requirements:
        1. Provide evidence-based medical information
        2. Cite recent research (post-2020 when possible)
        3. Include safety considerations
        4. Note confidence level
        """
        
        # Get DeepSeek research response
        raw_response = self.query_deepseek(research_prompt)
        
        # Apply safety filters
        if self._is_unsafe(raw_response):
            return "I cannot provide advice on this matter. Please consult a healthcare professional."
            
        return self._add_disclaimer(raw_response)

    def _is_unsafe(self, text: str):
        """Content safety check"""
        return self.safety_filter(text)[0]['label'] == 'HATE'

    def _add_disclaimer(self, text: str):
        """Add legal disclaimer to response"""
        return f"{text}\n\n---\n⚠️ This information is for research purposes only. Not medical advice."

    def _preprocess_image(self, image_path: str, modality: str):
        """Image preprocessing placeholder"""
        return torch.rand((1, 224, 224))

# Initialize system
assistant = MedicalAssistant()

def process_input(query, image, prescription):
    context = {}
    
    if image is not None:
        context["image_analysis"] = assistant.analyze_image(image, "xray")
    
    if prescription:
        context["prescription"] = assistant.parse_prescription(prescription)
    
    return assistant.generate_response(query, context)

# Gradio interface
interface = gr.Interface(
    fn=process_input,
    inputs=[
        gr.Textbox(label="Medical Query", placeholder="Enter your medical question..."),
        gr.Image(label="Medical Imaging", type="filepath"),
        gr.Textbox(label="Prescription Text")
    ],
    outputs=gr.Textbox(label="Research-Backed Response"),
    title="AI Medical Research Assistant",
    description=DISCLAIMER,
    allow_flagging="never"
)

interface.launch() make a requirements and readme file for this code for huggins face