Spaces:
Running
Running
Chris McMaster
commited on
Commit
·
f32824f
1
Parent(s):
72f912f
Updates, improvements, new ADR features
Browse files- README.md +179 -8
- adr_analysis.py +954 -0
- app.py +252 -11
- brand_to_generic.py +17 -0
- caching.py +28 -3
- clinical_calculators.py +13 -2
- drug_data_endpoints.py +79 -34
- requirements.txt +1 -1
- utils.py +12 -3
README.md
CHANGED
|
@@ -6,15 +6,186 @@ colorTo: green
|
|
| 6 |
sdk: gradio
|
| 7 |
app_file: app.py
|
| 8 |
pinned: false
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 9 |
---
|
|
|
|
| 10 |
|
| 11 |
-
|
| 12 |
|
| 13 |
-
|
| 14 |
|
| 15 |
-
|
| 16 |
-
|
| 17 |
-
-
|
| 18 |
-
-
|
| 19 |
-
-
|
| 20 |
-
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 6 |
sdk: gradio
|
| 7 |
app_file: app.py
|
| 8 |
pinned: false
|
| 9 |
+
license: apache-2.0
|
| 10 |
+
tags:
|
| 11 |
+
- pharmacology
|
| 12 |
+
- clinical-pharmacy
|
| 13 |
+
- healthcare
|
| 14 |
+
- drug-information
|
| 15 |
+
- calculator
|
| 16 |
+
- gradio
|
| 17 |
---
|
| 18 |
+
# Pharmacist MCP Server
|
| 19 |
|
| 20 |
+
A comprehensive Model Context Protocol (MCP) server providing pharmaceutical and clinical decision support tools for healthcare applications.
|
| 21 |
|
| 22 |
+
## 🚀 Features
|
| 23 |
|
| 24 |
+
### Drug Information & Safety
|
| 25 |
+
|
| 26 |
+
- **Adverse Event Search**: Query FAERS database for drug safety reports
|
| 27 |
+
- **Drug Label Warnings**: FDA boxed warnings, contraindications, and interactions
|
| 28 |
+
- **Drug Recalls**: Recent FDA recall information
|
| 29 |
+
- **Pregnancy & Lactation**: Comprehensive safety information with fallback data
|
| 30 |
+
- **Dose Adjustments**: Renal and hepatic dosing guidance
|
| 31 |
+
- **LiverTox Integration**: Hepatotoxicity information from NIH database
|
| 32 |
+
|
| 33 |
+
### Advanced ADR Analysis (NEW!)
|
| 34 |
+
|
| 35 |
+
- **Enhanced FAERS Search**: Advanced filtering by demographics, seriousness, and outcomes
|
| 36 |
+
- **Naranjo Probability Scale**: Automated causality assessment for adverse drug reactions
|
| 37 |
+
- **Disproportionality Analysis**: Signal detection using PRR, ROR, and IC methods
|
| 38 |
+
- **Case Similarity Analysis**: Find similar adverse event cases
|
| 39 |
+
- **Temporal Analysis**: Time-to-onset patterns and reporting trends
|
| 40 |
+
|
| 41 |
+
### Clinical Calculators
|
| 42 |
+
|
| 43 |
+
- **Creatinine Clearance**: Cockcroft-Gault equation
|
| 44 |
+
- **eGFR**: CKD-EPI equation with CKD staging
|
| 45 |
+
- **Child-Pugh Score**: Liver function assessment
|
| 46 |
+
- **BMI Calculator**: Body mass index and weight categories
|
| 47 |
+
- **Ideal Body Weight**: Devine formula
|
| 48 |
+
- **Dosing Weight**: Recommendations for medication dosing
|
| 49 |
+
- **Drug Burden Index (DBI)**: Anticholinergic and sedative burden assessment
|
| 50 |
+
|
| 51 |
+
### Utility Functions
|
| 52 |
+
|
| 53 |
+
- **Brand to Generic Lookup**: International drug name conversion
|
| 54 |
+
- **Unit Conversions**: Creatinine units (mg/dL ↔ μmol/L)
|
| 55 |
+
- **Cache Management**: Performance optimization with TTL caching
|
| 56 |
+
- **Health Monitoring**: Server status and performance metrics
|
| 57 |
+
|
| 58 |
+
## 🧬 ADR Analysis Capabilities
|
| 59 |
+
|
| 60 |
+
### Pharmacovigilance Tools
|
| 61 |
+
|
| 62 |
+
The server now includes comprehensive adverse drug reaction (ADR) analysis tools suitable for clinical decision support and pharmacovigilance:
|
| 63 |
+
|
| 64 |
+
#### 1. Enhanced FAERS Search
|
| 65 |
+
|
| 66 |
+
```python
|
| 67 |
+
# Search with demographic filters
|
| 68 |
+
enhanced_faers_search(
|
| 69 |
+
drug_name="warfarin",
|
| 70 |
+
adverse_event="bleeding",
|
| 71 |
+
age_range=">65",
|
| 72 |
+
gender="2", # female
|
| 73 |
+
serious_only=True
|
| 74 |
+
)
|
| 75 |
+
```
|
| 76 |
+
|
| 77 |
+
#### 2. Naranjo Causality Assessment
|
| 78 |
+
|
| 79 |
+
```python
|
| 80 |
+
# Automated causality scoring
|
| 81 |
+
calculate_naranjo_score(
|
| 82 |
+
adverse_reaction_after_drug="yes",
|
| 83 |
+
reaction_improved_after_stopping="yes",
|
| 84 |
+
# ... 10 standardized questions
|
| 85 |
+
)
|
| 86 |
+
# Returns: Score, probability (Definite/Probable/Possible/Doubtful)
|
| 87 |
+
```
|
| 88 |
+
|
| 89 |
+
#### 3. Signal Detection
|
| 90 |
+
|
| 91 |
+
```python
|
| 92 |
+
# Disproportionality analysis for signal detection
|
| 93 |
+
disproportionality_analysis(
|
| 94 |
+
drug_name="aspirin",
|
| 95 |
+
adverse_event="gastrointestinal bleeding"
|
| 96 |
+
)
|
| 97 |
+
# Returns: PRR, ROR, IC with confidence intervals
|
| 98 |
+
```
|
| 99 |
+
|
| 100 |
+
## 📊 Performance Improvements
|
| 101 |
+
|
| 102 |
+
- **Reduced API timeouts**: 3-5s instead of 10s for better responsiveness
|
| 103 |
+
- **Enhanced error handling**: Graceful degradation with meaningful error messages
|
| 104 |
+
- **Improved caching**: Smart cache management with statistics
|
| 105 |
+
- **Input validation**: Comprehensive bounds checking and data validation
|
| 106 |
+
|
| 107 |
+
## 🔧 Installation
|
| 108 |
+
|
| 109 |
+
```bash
|
| 110 |
+
# Clone the repository
|
| 111 |
+
git clone <repository-url>
|
| 112 |
+
cd pharmacist_mcp
|
| 113 |
+
|
| 114 |
+
# Install dependencies
|
| 115 |
+
pip install -r requirements.txt
|
| 116 |
+
|
| 117 |
+
# Run the server
|
| 118 |
+
python app.py
|
| 119 |
+
```
|
| 120 |
+
|
| 121 |
+
## 📋 Requirements
|
| 122 |
+
|
| 123 |
+
- Python 3.8+
|
| 124 |
+
- Internet connection for API access
|
| 125 |
+
- Dependencies listed in `requirements.txt`
|
| 126 |
+
|
| 127 |
+
## 🧪 Testing
|
| 128 |
+
|
| 129 |
+
Run the comprehensive test suite:
|
| 130 |
+
|
| 131 |
+
```bash
|
| 132 |
+
# Test core ADR analysis algorithms
|
| 133 |
+
python test_disproportionality.py
|
| 134 |
+
|
| 135 |
+
# Test individual components
|
| 136 |
+
python -c "from adr_analysis import calculate_naranjo_score; print(calculate_naranjo_score(...))"
|
| 137 |
+
```
|
| 138 |
+
|
| 139 |
+
## 🔗 API Endpoints
|
| 140 |
+
|
| 141 |
+
### MCP Functions Available:
|
| 142 |
+
|
| 143 |
+
#### Drug Safety & Information
|
| 144 |
+
|
| 145 |
+
- `search_adverse_events_mcp(drug_name, limit)`
|
| 146 |
+
- `fetch_event_details_mcp(event_id)`
|
| 147 |
+
- `drug_label_warnings_mcp(drug_name)`
|
| 148 |
+
- `drug_recalls_mcp(drug_name, limit)`
|
| 149 |
+
- `drug_pregnancy_lactation_mcp(drug_name)`
|
| 150 |
+
- `drug_dose_adjustments_mcp(drug_name)`
|
| 151 |
+
- `drug_livertox_summary_mcp(drug_name)`
|
| 152 |
+
|
| 153 |
+
#### ADR Analysis (NEW!)
|
| 154 |
+
|
| 155 |
+
- `enhanced_faers_search_mcp(drug_name, adverse_event, age_range, gender, serious_only, limit)`
|
| 156 |
+
- `calculate_naranjo_score_mcp(...10 parameters...)`
|
| 157 |
+
- `disproportionality_analysis_mcp(drug_name, adverse_event, background_limit)`
|
| 158 |
+
- `find_similar_cases_mcp(reference_case_id, similarity_threshold, limit)`
|
| 159 |
+
- `temporal_analysis_mcp(drug_name, adverse_event, limit)`
|
| 160 |
+
|
| 161 |
+
#### Clinical Calculators
|
| 162 |
+
|
| 163 |
+
- `calculate_creatinine_clearance_mcp(age, weight_kg, serum_creatinine, is_female)`
|
| 164 |
+
- `calculate_egfr_mcp(age, serum_creatinine, is_female, is_black)`
|
| 165 |
+
- `calculate_child_pugh_score_mcp(bilirubin, albumin, inr, ascites, encephalopathy)`
|
| 166 |
+
- `calculate_bmi_mcp(weight_kg, height_cm)`
|
| 167 |
+
- `calculate_ideal_body_weight_mcp(height_cm, is_male)`
|
| 168 |
+
- `recommend_dosing_weight_mcp(actual_weight, height_cm, is_male)`
|
| 169 |
+
- `calculate_drug_burden_index_mcp(drug_list, route)`
|
| 170 |
+
|
| 171 |
+
#### Utilities
|
| 172 |
+
|
| 173 |
+
- `brand_to_generic_lookup_mcp(brand_name, prefer_countries)`
|
| 174 |
+
- `convert_creatinine_units_mcp(value, from_unit, to_unit)`
|
| 175 |
+
- `get_cache_stats_mcp()`
|
| 176 |
+
- `health_check_mcp()`
|
| 177 |
+
|
| 178 |
+
## 📚 Data Sources
|
| 179 |
+
|
| 180 |
+
- **FAERS**: FDA Adverse Event Reporting System
|
| 181 |
+
- **FDA Drug Labels**: Official prescribing information
|
| 182 |
+
- **LiverTox**: NIH hepatotoxicity database
|
| 183 |
+
- **Drug Brand Database**: International brand name mappings
|
| 184 |
+
|
| 185 |
+
## ⚠️ Important Notes
|
| 186 |
+
|
| 187 |
+
- This tool is for informational purposes only
|
| 188 |
+
- Always consult official prescribing information
|
| 189 |
+
- Results should be interpreted by qualified healthcare professionals
|
| 190 |
+
- FAERS data has inherent limitations and reporting biases
|
| 191 |
+
- Pharmacovigilance analysis requires clinical expertise
|
adr_analysis.py
ADDED
|
@@ -0,0 +1,954 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Advanced Adverse Drug Reaction (ADR) Analysis Tools
|
| 3 |
+
|
| 4 |
+
This module provides comprehensive pharmacovigilance capabilities including:
|
| 5 |
+
- Enhanced FAERS database searches with filtering
|
| 6 |
+
- Naranjo probability scale calculator
|
| 7 |
+
- Disproportionality analysis (PRR, ROR, IC)
|
| 8 |
+
- Case similarity analysis
|
| 9 |
+
- Temporal pattern analysis
|
| 10 |
+
"""
|
| 11 |
+
|
| 12 |
+
import requests
|
| 13 |
+
import re
|
| 14 |
+
import math
|
| 15 |
+
import logging
|
| 16 |
+
from datetime import datetime, timedelta
|
| 17 |
+
from typing import Dict, List, Any, Optional, Tuple
|
| 18 |
+
from collections import defaultdict, Counter
|
| 19 |
+
|
| 20 |
+
from caching import with_caching
|
| 21 |
+
from utils import with_error_handling, make_api_request
|
| 22 |
+
|
| 23 |
+
logger = logging.getLogger(__name__)
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
@with_error_handling
|
| 27 |
+
@with_caching(ttl=1800)
|
| 28 |
+
def enhanced_faers_search(
|
| 29 |
+
drug_name: str,
|
| 30 |
+
adverse_event: str = None,
|
| 31 |
+
age_range: str = None,
|
| 32 |
+
gender: str = None,
|
| 33 |
+
serious_only: bool = False,
|
| 34 |
+
limit: int = 100
|
| 35 |
+
) -> Dict[str, Any]:
|
| 36 |
+
"""
|
| 37 |
+
Enhanced FAERS search with filtering capabilities for pharmacovigilance analysis.
|
| 38 |
+
|
| 39 |
+
Args:
|
| 40 |
+
drug_name: Drug name to search for
|
| 41 |
+
adverse_event: Specific adverse event/reaction to filter by (optional)
|
| 42 |
+
age_range: Age range filter like "18-65" or ">65" (optional)
|
| 43 |
+
gender: Gender filter "1" (male) or "2" (female) (optional)
|
| 44 |
+
serious_only: If True, only return serious adverse events
|
| 45 |
+
limit: Maximum number of results (default 100)
|
| 46 |
+
|
| 47 |
+
Returns:
|
| 48 |
+
Dict with enhanced case data including demographics, outcomes, and temporal info
|
| 49 |
+
"""
|
| 50 |
+
if not drug_name or not drug_name.strip():
|
| 51 |
+
raise ValueError("Drug name cannot be empty")
|
| 52 |
+
|
| 53 |
+
# Build search query
|
| 54 |
+
search_parts = [f'patient.drug.medicinalproduct:"{drug_name.strip()}"']
|
| 55 |
+
|
| 56 |
+
if adverse_event:
|
| 57 |
+
search_parts.append(f'patient.reaction.reactionmeddrapt:"{adverse_event.strip()}"')
|
| 58 |
+
|
| 59 |
+
if serious_only:
|
| 60 |
+
search_parts.append('serious:"1"')
|
| 61 |
+
|
| 62 |
+
if gender in ["1", "2"]:
|
| 63 |
+
search_parts.append(f'patient.patientsex:"{gender}"')
|
| 64 |
+
|
| 65 |
+
search_query = " AND ".join(search_parts)
|
| 66 |
+
|
| 67 |
+
base_url = "https://api.fda.gov/drug/event.json"
|
| 68 |
+
query_params = {
|
| 69 |
+
"search": search_query,
|
| 70 |
+
"limit": min(max(1, limit), 1000)
|
| 71 |
+
}
|
| 72 |
+
|
| 73 |
+
response = make_api_request(base_url, query_params, timeout=15)
|
| 74 |
+
|
| 75 |
+
if response.status_code != 200:
|
| 76 |
+
if response.status_code == 404:
|
| 77 |
+
return {
|
| 78 |
+
"cases": [],
|
| 79 |
+
"total_found": 0,
|
| 80 |
+
"query_info": {
|
| 81 |
+
"drug": drug_name,
|
| 82 |
+
"adverse_event": adverse_event,
|
| 83 |
+
"filters_applied": {
|
| 84 |
+
"age_range": age_range,
|
| 85 |
+
"gender": gender,
|
| 86 |
+
"serious_only": serious_only
|
| 87 |
+
}
|
| 88 |
+
},
|
| 89 |
+
"message": "No matching cases found"
|
| 90 |
+
}
|
| 91 |
+
raise requests.exceptions.RequestException(f"Enhanced FAERS search failed: {response.status_code}")
|
| 92 |
+
|
| 93 |
+
data = response.json()
|
| 94 |
+
cases = []
|
| 95 |
+
|
| 96 |
+
for rec in data.get("results", []):
|
| 97 |
+
case = extract_case_details(rec, age_range)
|
| 98 |
+
if case: # Only include if age filter passes
|
| 99 |
+
cases.append(case)
|
| 100 |
+
|
| 101 |
+
# Calculate summary statistics
|
| 102 |
+
summary_stats = calculate_case_statistics(cases)
|
| 103 |
+
|
| 104 |
+
return {
|
| 105 |
+
"cases": cases,
|
| 106 |
+
"total_found": data.get("meta", {}).get("results", {}).get("total", 0),
|
| 107 |
+
"filtered_count": len(cases),
|
| 108 |
+
"query_info": {
|
| 109 |
+
"drug": drug_name,
|
| 110 |
+
"adverse_event": adverse_event,
|
| 111 |
+
"filters_applied": {
|
| 112 |
+
"age_range": age_range,
|
| 113 |
+
"gender": gender,
|
| 114 |
+
"serious_only": serious_only
|
| 115 |
+
}
|
| 116 |
+
},
|
| 117 |
+
"summary_statistics": summary_stats
|
| 118 |
+
}
|
| 119 |
+
|
| 120 |
+
|
| 121 |
+
def extract_case_details(rec: Dict, age_range: str = None) -> Optional[Dict]:
|
| 122 |
+
"""Extract and structure case details from FAERS record."""
|
| 123 |
+
patient = rec.get("patient", {})
|
| 124 |
+
|
| 125 |
+
# Extract patient demographics
|
| 126 |
+
age = patient.get("patientagegroup")
|
| 127 |
+
age_years = patient.get("patientage")
|
| 128 |
+
gender = patient.get("patientsex")
|
| 129 |
+
|
| 130 |
+
# Apply age filter if specified
|
| 131 |
+
if age_range and age_years:
|
| 132 |
+
try:
|
| 133 |
+
age_num = float(age_years)
|
| 134 |
+
if not passes_age_filter(age_num, age_range):
|
| 135 |
+
return None
|
| 136 |
+
except (ValueError, TypeError):
|
| 137 |
+
pass
|
| 138 |
+
|
| 139 |
+
# Extract drug information
|
| 140 |
+
drugs = []
|
| 141 |
+
for drug in patient.get("drug", []):
|
| 142 |
+
drug_info = {
|
| 143 |
+
"name": drug.get("medicinalproduct", ""),
|
| 144 |
+
"characterization": drug.get("drugcharacterization"), # 1=suspect, 2=concomitant, 3=interacting
|
| 145 |
+
"indication": drug.get("drugindication", ""),
|
| 146 |
+
"start_date": drug.get("drugstartdate", ""),
|
| 147 |
+
"end_date": drug.get("drugenddate", ""),
|
| 148 |
+
"dosage": drug.get("drugdosagetext", ""),
|
| 149 |
+
"route": drug.get("drugadministrationroute", "")
|
| 150 |
+
}
|
| 151 |
+
drugs.append(drug_info)
|
| 152 |
+
|
| 153 |
+
# Extract reactions
|
| 154 |
+
reactions = []
|
| 155 |
+
for reaction in patient.get("reaction", []):
|
| 156 |
+
reaction_info = {
|
| 157 |
+
"term": reaction.get("reactionmeddrapt", ""),
|
| 158 |
+
"outcome": reaction.get("reactionoutcome") # 1=recovered, 2=recovering, 3=not recovered, 4=recovered with sequelae, 5=fatal, 6=unknown
|
| 159 |
+
}
|
| 160 |
+
reactions.append(reaction_info)
|
| 161 |
+
|
| 162 |
+
# Extract seriousness criteria
|
| 163 |
+
seriousness = {
|
| 164 |
+
"serious": bool(int(rec.get("serious", "0"))),
|
| 165 |
+
"death": bool(int(rec.get("seriousnessdeath", "0"))),
|
| 166 |
+
"life_threatening": bool(int(rec.get("seriousnesslifethreatening", "0"))),
|
| 167 |
+
"hospitalization": bool(int(rec.get("seriousnesshospitalization", "0"))),
|
| 168 |
+
"disability": bool(int(rec.get("seriousnessdisabling", "0"))),
|
| 169 |
+
"congenital_anomaly": bool(int(rec.get("seriousnesscongenitalanomali", "0"))),
|
| 170 |
+
"other_serious": bool(int(rec.get("seriousnessother", "0")))
|
| 171 |
+
}
|
| 172 |
+
|
| 173 |
+
return {
|
| 174 |
+
"safety_report_id": rec.get("safetyreportid"),
|
| 175 |
+
"receive_date": rec.get("receivedate"),
|
| 176 |
+
"patient": {
|
| 177 |
+
"age": age_years,
|
| 178 |
+
"age_group": age,
|
| 179 |
+
"gender": gender, # 1=male, 2=female
|
| 180 |
+
"weight": patient.get("patientweight")
|
| 181 |
+
},
|
| 182 |
+
"drugs": drugs,
|
| 183 |
+
"reactions": reactions,
|
| 184 |
+
"seriousness": seriousness,
|
| 185 |
+
"reporter_qualification": rec.get("primarysource", {}).get("qualification"), # 1=physician, 2=pharmacist, etc.
|
| 186 |
+
"country": rec.get("occurcountry")
|
| 187 |
+
}
|
| 188 |
+
|
| 189 |
+
|
| 190 |
+
def passes_age_filter(age: float, age_range: str) -> bool:
|
| 191 |
+
"""Check if age passes the specified filter."""
|
| 192 |
+
age_range = age_range.strip()
|
| 193 |
+
|
| 194 |
+
if age_range.startswith(">"):
|
| 195 |
+
threshold = float(age_range[1:])
|
| 196 |
+
return age > threshold
|
| 197 |
+
elif age_range.startswith("<"):
|
| 198 |
+
threshold = float(age_range[1:])
|
| 199 |
+
return age < threshold
|
| 200 |
+
elif age_range.startswith(">="):
|
| 201 |
+
threshold = float(age_range[2:])
|
| 202 |
+
return age >= threshold
|
| 203 |
+
elif age_range.startswith("<="):
|
| 204 |
+
threshold = float(age_range[2:])
|
| 205 |
+
return age <= threshold
|
| 206 |
+
elif "-" in age_range:
|
| 207 |
+
min_age, max_age = map(float, age_range.split("-"))
|
| 208 |
+
return min_age <= age <= max_age
|
| 209 |
+
|
| 210 |
+
return True
|
| 211 |
+
|
| 212 |
+
|
| 213 |
+
def calculate_case_statistics(cases: List[Dict]) -> Dict[str, Any]:
|
| 214 |
+
"""Calculate summary statistics from case data."""
|
| 215 |
+
if not cases:
|
| 216 |
+
return {}
|
| 217 |
+
|
| 218 |
+
# Demographics
|
| 219 |
+
ages = [float(case["patient"]["age"]) for case in cases if case["patient"]["age"]]
|
| 220 |
+
genders = [case["patient"]["gender"] for case in cases if case["patient"]["gender"]]
|
| 221 |
+
|
| 222 |
+
# Outcomes
|
| 223 |
+
serious_cases = sum(1 for case in cases if case["seriousness"]["serious"])
|
| 224 |
+
fatal_cases = sum(1 for case in cases if case["seriousness"]["death"])
|
| 225 |
+
|
| 226 |
+
# Reporter types
|
| 227 |
+
reporter_types = [case["reporter_qualification"] for case in cases if case["reporter_qualification"]]
|
| 228 |
+
|
| 229 |
+
# Most common reactions
|
| 230 |
+
all_reactions = []
|
| 231 |
+
for case in cases:
|
| 232 |
+
all_reactions.extend([r["term"] for r in case["reactions"]])
|
| 233 |
+
reaction_counts = Counter(all_reactions)
|
| 234 |
+
|
| 235 |
+
stats = {
|
| 236 |
+
"total_cases": len(cases),
|
| 237 |
+
"serious_cases": serious_cases,
|
| 238 |
+
"serious_percentage": round(serious_cases / len(cases) * 100, 1),
|
| 239 |
+
"fatal_cases": fatal_cases,
|
| 240 |
+
"fatal_percentage": round(fatal_cases / len(cases) * 100, 1) if len(cases) > 0 else 0,
|
| 241 |
+
"demographics": {
|
| 242 |
+
"age_stats": {
|
| 243 |
+
"mean": round(sum(ages) / len(ages), 1) if ages else None,
|
| 244 |
+
"median": sorted(ages)[len(ages)//2] if ages else None,
|
| 245 |
+
"range": [min(ages), max(ages)] if ages else None
|
| 246 |
+
},
|
| 247 |
+
"gender_distribution": dict(Counter(genders))
|
| 248 |
+
},
|
| 249 |
+
"top_reactions": dict(reaction_counts.most_common(10)),
|
| 250 |
+
"reporter_types": dict(Counter(reporter_types))
|
| 251 |
+
}
|
| 252 |
+
|
| 253 |
+
return stats
|
| 254 |
+
|
| 255 |
+
|
| 256 |
+
@with_error_handling
|
| 257 |
+
def calculate_naranjo_score(
|
| 258 |
+
adverse_reaction_after_drug: str, # "yes", "no", "unknown"
|
| 259 |
+
reaction_improved_after_stopping: str, # "yes", "no", "unknown"
|
| 260 |
+
reaction_reappeared_after_readministration: str, # "yes", "no", "unknown"
|
| 261 |
+
alternative_causes_exist: str, # "yes", "no", "unknown"
|
| 262 |
+
reaction_when_placebo_given: str, # "yes", "no", "unknown"
|
| 263 |
+
drug_detected_in_blood: str, # "yes", "no", "unknown"
|
| 264 |
+
reaction_worse_with_higher_dose: str, # "yes", "no", "unknown"
|
| 265 |
+
similar_reaction_to_drug_before: str, # "yes", "no", "unknown"
|
| 266 |
+
adverse_event_confirmed_objectively: str, # "yes", "no", "unknown"
|
| 267 |
+
reaction_appeared_after_suspected_drug_given: str # "yes", "no", "unknown"
|
| 268 |
+
) -> Dict[str, Any]:
|
| 269 |
+
"""
|
| 270 |
+
Calculate Naranjo Adverse Drug Reaction Probability Scale.
|
| 271 |
+
|
| 272 |
+
The Naranjo scale helps determine the likelihood that an adverse event
|
| 273 |
+
is related to drug therapy rather than other factors.
|
| 274 |
+
|
| 275 |
+
Args:
|
| 276 |
+
All parameters should be "yes", "no", or "unknown"
|
| 277 |
+
|
| 278 |
+
Returns:
|
| 279 |
+
Dict with score, probability category, and detailed breakdown
|
| 280 |
+
"""
|
| 281 |
+
|
| 282 |
+
# Naranjo scoring system
|
| 283 |
+
questions = [
|
| 284 |
+
{
|
| 285 |
+
"question": "Are there previous conclusive reports on this reaction?",
|
| 286 |
+
"answer": adverse_reaction_after_drug,
|
| 287 |
+
"scores": {"yes": 1, "no": 0, "unknown": 0}
|
| 288 |
+
},
|
| 289 |
+
{
|
| 290 |
+
"question": "Did the adverse event appear after the suspected drug was administered?",
|
| 291 |
+
"answer": reaction_appeared_after_suspected_drug_given,
|
| 292 |
+
"scores": {"yes": 2, "no": -1, "unknown": 0}
|
| 293 |
+
},
|
| 294 |
+
{
|
| 295 |
+
"question": "Did the adverse reaction improve when the drug was discontinued or a specific antagonist was administered?",
|
| 296 |
+
"answer": reaction_improved_after_stopping,
|
| 297 |
+
"scores": {"yes": 1, "no": 0, "unknown": 0}
|
| 298 |
+
},
|
| 299 |
+
{
|
| 300 |
+
"question": "Did the adverse reaction reappear when the drug was readministered?",
|
| 301 |
+
"answer": reaction_reappeared_after_readministration,
|
| 302 |
+
"scores": {"yes": 2, "no": -1, "unknown": 0}
|
| 303 |
+
},
|
| 304 |
+
{
|
| 305 |
+
"question": "Are there alternative causes (other than the drug) that could on their own have caused the reaction?",
|
| 306 |
+
"answer": alternative_causes_exist,
|
| 307 |
+
"scores": {"yes": -1, "no": 2, "unknown": 0}
|
| 308 |
+
},
|
| 309 |
+
{
|
| 310 |
+
"question": "Did the reaction reappear when a placebo was given?",
|
| 311 |
+
"answer": reaction_when_placebo_given,
|
| 312 |
+
"scores": {"yes": -1, "no": 1, "unknown": 0}
|
| 313 |
+
},
|
| 314 |
+
{
|
| 315 |
+
"question": "Was the drug detected in blood (or other fluids) in concentrations known to be toxic?",
|
| 316 |
+
"answer": drug_detected_in_blood,
|
| 317 |
+
"scores": {"yes": 1, "no": 0, "unknown": 0}
|
| 318 |
+
},
|
| 319 |
+
{
|
| 320 |
+
"question": "Was the reaction more severe when the dose was increased or less severe when the dose was decreased?",
|
| 321 |
+
"answer": reaction_worse_with_higher_dose,
|
| 322 |
+
"scores": {"yes": 1, "no": 0, "unknown": 0}
|
| 323 |
+
},
|
| 324 |
+
{
|
| 325 |
+
"question": "Did the patient have a similar reaction to the same or similar drugs in any previous exposure?",
|
| 326 |
+
"answer": similar_reaction_to_drug_before,
|
| 327 |
+
"scores": {"yes": 1, "no": 0, "unknown": 0}
|
| 328 |
+
},
|
| 329 |
+
{
|
| 330 |
+
"question": "Was the adverse event confirmed by any objective evidence?",
|
| 331 |
+
"answer": adverse_event_confirmed_objectively,
|
| 332 |
+
"scores": {"yes": 1, "no": 0, "unknown": 0}
|
| 333 |
+
}
|
| 334 |
+
]
|
| 335 |
+
|
| 336 |
+
total_score = 0
|
| 337 |
+
question_details = []
|
| 338 |
+
|
| 339 |
+
for q in questions:
|
| 340 |
+
answer = q["answer"].lower().strip()
|
| 341 |
+
if answer not in q["scores"]:
|
| 342 |
+
raise ValueError(f"Invalid answer '{answer}'. Must be 'yes', 'no', or 'unknown'")
|
| 343 |
+
|
| 344 |
+
score = q["scores"][answer]
|
| 345 |
+
total_score += score
|
| 346 |
+
|
| 347 |
+
question_details.append({
|
| 348 |
+
"question": q["question"],
|
| 349 |
+
"answer": answer,
|
| 350 |
+
"points": score
|
| 351 |
+
})
|
| 352 |
+
|
| 353 |
+
# Determine probability category
|
| 354 |
+
if total_score >= 9:
|
| 355 |
+
category = "Definite"
|
| 356 |
+
probability = "≥95%"
|
| 357 |
+
interpretation = "The adverse reaction is definitely related to the drug."
|
| 358 |
+
elif total_score >= 5:
|
| 359 |
+
category = "Probable"
|
| 360 |
+
probability = "75-95%"
|
| 361 |
+
interpretation = "The adverse reaction is probably related to the drug."
|
| 362 |
+
elif total_score >= 1:
|
| 363 |
+
category = "Possible"
|
| 364 |
+
probability = "25-75%"
|
| 365 |
+
interpretation = "The adverse reaction is possibly related to the drug."
|
| 366 |
+
else:
|
| 367 |
+
category = "Doubtful"
|
| 368 |
+
probability = "<25%"
|
| 369 |
+
interpretation = "The adverse reaction is doubtfully related to the drug."
|
| 370 |
+
|
| 371 |
+
return {
|
| 372 |
+
"total_score": total_score,
|
| 373 |
+
"category": category,
|
| 374 |
+
"probability": probability,
|
| 375 |
+
"interpretation": interpretation,
|
| 376 |
+
"question_breakdown": question_details,
|
| 377 |
+
"scale_info": {
|
| 378 |
+
"name": "Naranjo Adverse Drug Reaction Probability Scale",
|
| 379 |
+
"reference": "Naranjo CA, et al. Clin Pharmacol Ther. 1981;30(2):239-245",
|
| 380 |
+
"scoring": {
|
| 381 |
+
"Definite": "≥9 points",
|
| 382 |
+
"Probable": "5-8 points",
|
| 383 |
+
"Possible": "1-4 points",
|
| 384 |
+
"Doubtful": "≤0 points"
|
| 385 |
+
}
|
| 386 |
+
}
|
| 387 |
+
}
|
| 388 |
+
|
| 389 |
+
|
| 390 |
+
@with_error_handling
|
| 391 |
+
@with_caching(ttl=3600)
|
| 392 |
+
def disproportionality_analysis(
|
| 393 |
+
drug_name: str,
|
| 394 |
+
adverse_event: str,
|
| 395 |
+
background_limit: int = 10000
|
| 396 |
+
) -> Dict[str, Any]:
|
| 397 |
+
"""
|
| 398 |
+
Perform disproportionality analysis to detect potential drug-adverse event signals.
|
| 399 |
+
|
| 400 |
+
Calculates Proportional Reporting Ratio (PRR), Reporting Odds Ratio (ROR),
|
| 401 |
+
and Information Component (IC) with confidence intervals.
|
| 402 |
+
|
| 403 |
+
Args:
|
| 404 |
+
drug_name: Drug of interest
|
| 405 |
+
adverse_event: Adverse event of interest
|
| 406 |
+
background_limit: Number of background cases to sample for comparison
|
| 407 |
+
|
| 408 |
+
Returns:
|
| 409 |
+
Dict with PRR, ROR, IC values and statistical significance
|
| 410 |
+
"""
|
| 411 |
+
|
| 412 |
+
try:
|
| 413 |
+
base_url = "https://api.fda.gov/drug/event.json"
|
| 414 |
+
|
| 415 |
+
# Get cases for drug + adverse event (a)
|
| 416 |
+
drug_ae_query = {
|
| 417 |
+
"search": f'patient.drug.medicinalproduct:"{drug_name}" AND patient.reaction.reactionmeddrapt:"{adverse_event}"',
|
| 418 |
+
"limit": 1
|
| 419 |
+
}
|
| 420 |
+
|
| 421 |
+
drug_ae_response = make_api_request(base_url, drug_ae_query, timeout=10)
|
| 422 |
+
|
| 423 |
+
if drug_ae_response and drug_ae_response.status_code == 200:
|
| 424 |
+
drug_ae_data = drug_ae_response.json()
|
| 425 |
+
a = drug_ae_data.get("meta", {}).get("results", {}).get("total", 0)
|
| 426 |
+
else:
|
| 427 |
+
a = 0
|
| 428 |
+
|
| 429 |
+
if a == 0:
|
| 430 |
+
return {
|
| 431 |
+
"drug": drug_name,
|
| 432 |
+
"adverse_event": adverse_event,
|
| 433 |
+
"message": "No cases found for this drug-adverse event combination",
|
| 434 |
+
"signal_detected": False,
|
| 435 |
+
"case_count": 0
|
| 436 |
+
}
|
| 437 |
+
|
| 438 |
+
# Get total cases for drug (a + b)
|
| 439 |
+
drug_total_query = {
|
| 440 |
+
"search": f'patient.drug.medicinalproduct:"{drug_name}"',
|
| 441 |
+
"limit": 1
|
| 442 |
+
}
|
| 443 |
+
|
| 444 |
+
drug_total_response = make_api_request(base_url, drug_total_query, timeout=10)
|
| 445 |
+
|
| 446 |
+
if drug_total_response and drug_total_response.status_code == 200:
|
| 447 |
+
drug_total_data = drug_total_response.json()
|
| 448 |
+
total_drug_cases = drug_total_data.get("meta", {}).get("results", {}).get("total", 0)
|
| 449 |
+
b = max(total_drug_cases - a, 1) # Ensure b is at least 1
|
| 450 |
+
else:
|
| 451 |
+
b = max(a * 5, 10) # Conservative estimate
|
| 452 |
+
|
| 453 |
+
# Get total cases for adverse event (a + c)
|
| 454 |
+
ae_total_query = {
|
| 455 |
+
"search": f'patient.reaction.reactionmeddrapt:"{adverse_event}"',
|
| 456 |
+
"limit": 1
|
| 457 |
+
}
|
| 458 |
+
|
| 459 |
+
ae_total_response = make_api_request(base_url, ae_total_query, timeout=10)
|
| 460 |
+
|
| 461 |
+
if ae_total_response and ae_total_response.status_code == 200:
|
| 462 |
+
ae_total_data = ae_total_response.json()
|
| 463 |
+
total_ae_cases = ae_total_data.get("meta", {}).get("results", {}).get("total", 0)
|
| 464 |
+
c = max(total_ae_cases - a, 1) # Avoid zero
|
| 465 |
+
else:
|
| 466 |
+
c = max(a * 10, 100) # Conservative estimate
|
| 467 |
+
|
| 468 |
+
# Estimate total background cases (d)
|
| 469 |
+
# Use a reasonable estimate based on FAERS database size
|
| 470 |
+
total_cases_estimate = 15000000 # Approximate FAERS database size
|
| 471 |
+
d = max(total_cases_estimate - a - b - c, 1000)
|
| 472 |
+
|
| 473 |
+
# Calculate disproportionality measures
|
| 474 |
+
results = calculate_disproportionality_measures(a, b, c, d)
|
| 475 |
+
|
| 476 |
+
# Add metadata
|
| 477 |
+
results.update({
|
| 478 |
+
"drug": drug_name,
|
| 479 |
+
"adverse_event": adverse_event,
|
| 480 |
+
"contingency_table": {
|
| 481 |
+
"drug_ae": a,
|
| 482 |
+
"drug_other_ae": b,
|
| 483 |
+
"other_drug_ae": c,
|
| 484 |
+
"other_drug_other_ae": d,
|
| 485 |
+
"total": a + b + c + d
|
| 486 |
+
},
|
| 487 |
+
"data_sources": {
|
| 488 |
+
"drug_ae_cases": "FAERS API direct query",
|
| 489 |
+
"total_drug_cases": "FAERS API direct query",
|
| 490 |
+
"total_ae_cases": "FAERS API direct query",
|
| 491 |
+
"background_estimate": "Statistical approximation"
|
| 492 |
+
},
|
| 493 |
+
"data_notes": [
|
| 494 |
+
"This analysis uses FAERS data which has inherent limitations",
|
| 495 |
+
"Results should be interpreted by qualified pharmacovigilance professionals",
|
| 496 |
+
"Background estimates are approximations due to API limitations",
|
| 497 |
+
"Consider confounding factors and reporting biases"
|
| 498 |
+
]
|
| 499 |
+
})
|
| 500 |
+
|
| 501 |
+
return results
|
| 502 |
+
|
| 503 |
+
except Exception as e:
|
| 504 |
+
logger.error(f"Error in disproportionality analysis: {e}")
|
| 505 |
+
return {
|
| 506 |
+
"drug": drug_name,
|
| 507 |
+
"adverse_event": adverse_event,
|
| 508 |
+
"error": str(e),
|
| 509 |
+
"message": "Analysis failed due to data access issues",
|
| 510 |
+
"signal_detected": False,
|
| 511 |
+
"case_count": 0
|
| 512 |
+
}
|
| 513 |
+
|
| 514 |
+
|
| 515 |
+
def calculate_disproportionality_measures(a: int, b: int, c: int, d: int) -> Dict[str, Any]:
|
| 516 |
+
"""
|
| 517 |
+
Calculate PRR, ROR, and IC with confidence intervals.
|
| 518 |
+
|
| 519 |
+
2x2 contingency table:
|
| 520 |
+
AE of Interest Other AEs
|
| 521 |
+
Drug of Interest a b
|
| 522 |
+
Other Drugs c d
|
| 523 |
+
"""
|
| 524 |
+
|
| 525 |
+
# Proportional Reporting Ratio (PRR)
|
| 526 |
+
prr = (a / (a + b)) / (c / (c + d)) if (a + b) > 0 and (c + d) > 0 else 0
|
| 527 |
+
|
| 528 |
+
# PRR 95% CI (using log transformation)
|
| 529 |
+
if a > 0:
|
| 530 |
+
log_prr = math.log(prr)
|
| 531 |
+
se_log_prr = math.sqrt(1/a + 1/c - 1/(a+b) - 1/(c+d))
|
| 532 |
+
prr_ci_lower = math.exp(log_prr - 1.96 * se_log_prr)
|
| 533 |
+
prr_ci_upper = math.exp(log_prr + 1.96 * se_log_prr)
|
| 534 |
+
else:
|
| 535 |
+
prr_ci_lower = prr_ci_upper = 0
|
| 536 |
+
|
| 537 |
+
# Reporting Odds Ratio (ROR)
|
| 538 |
+
ror = (a * d) / (b * c) if b > 0 and c > 0 else 0
|
| 539 |
+
|
| 540 |
+
# ROR 95% CI
|
| 541 |
+
if a > 0 and b > 0 and c > 0 and d > 0:
|
| 542 |
+
log_ror = math.log(ror)
|
| 543 |
+
se_log_ror = math.sqrt(1/a + 1/b + 1/c + 1/d)
|
| 544 |
+
ror_ci_lower = math.exp(log_ror - 1.96 * se_log_ror)
|
| 545 |
+
ror_ci_upper = math.exp(log_ror + 1.96 * se_log_ror)
|
| 546 |
+
else:
|
| 547 |
+
ror_ci_lower = ror_ci_upper = 0
|
| 548 |
+
|
| 549 |
+
# Information Component (IC)
|
| 550 |
+
expected = ((a + b) * (a + c)) / (a + b + c + d)
|
| 551 |
+
ic = math.log2(a / expected) if expected > 0 and a > 0 else 0
|
| 552 |
+
|
| 553 |
+
# IC 95% CI (simplified approximation)
|
| 554 |
+
if a > 0:
|
| 555 |
+
ic_se = 1 / (math.log(2) * math.sqrt(a))
|
| 556 |
+
ic_ci_lower = ic - 1.96 * ic_se
|
| 557 |
+
ic_ci_upper = ic + 1.96 * ic_se
|
| 558 |
+
else:
|
| 559 |
+
ic_ci_lower = ic_ci_upper = 0
|
| 560 |
+
|
| 561 |
+
# Signal detection criteria
|
| 562 |
+
prr_signal = prr >= 2.0 and prr_ci_lower > 1.0 and a >= 3
|
| 563 |
+
ror_signal = ror >= 2.0 and ror_ci_lower > 1.0 and a >= 3
|
| 564 |
+
ic_signal = ic_ci_lower > 0 and a >= 3
|
| 565 |
+
|
| 566 |
+
signal_detected = prr_signal or ror_signal or ic_signal
|
| 567 |
+
|
| 568 |
+
return {
|
| 569 |
+
"proportional_reporting_ratio": {
|
| 570 |
+
"value": round(prr, 3),
|
| 571 |
+
"confidence_interval_95": [round(prr_ci_lower, 3), round(prr_ci_upper, 3)],
|
| 572 |
+
"signal_detected": prr_signal,
|
| 573 |
+
"interpretation": "PRR ≥2 with lower CI >1 suggests potential signal" if prr_signal else "No signal detected by PRR criteria"
|
| 574 |
+
},
|
| 575 |
+
"reporting_odds_ratio": {
|
| 576 |
+
"value": round(ror, 3),
|
| 577 |
+
"confidence_interval_95": [round(ror_ci_lower, 3), round(ror_ci_upper, 3)],
|
| 578 |
+
"signal_detected": ror_signal,
|
| 579 |
+
"interpretation": "ROR ≥2 with lower CI >1 suggests potential signal" if ror_signal else "No signal detected by ROR criteria"
|
| 580 |
+
},
|
| 581 |
+
"information_component": {
|
| 582 |
+
"value": round(ic, 3),
|
| 583 |
+
"confidence_interval_95": [round(ic_ci_lower, 3), round(ic_ci_upper, 3)],
|
| 584 |
+
"signal_detected": ic_signal,
|
| 585 |
+
"interpretation": "IC lower CI >0 suggests potential signal" if ic_signal else "No signal detected by IC criteria"
|
| 586 |
+
},
|
| 587 |
+
"overall_signal_detected": signal_detected,
|
| 588 |
+
"case_count": a,
|
| 589 |
+
"signal_strength": "Strong" if (prr_signal and ror_signal and ic_signal) else
|
| 590 |
+
"Moderate" if signal_detected else "Weak/None"
|
| 591 |
+
}
|
| 592 |
+
|
| 593 |
+
|
| 594 |
+
@with_error_handling
|
| 595 |
+
@with_caching(ttl=1800)
|
| 596 |
+
def find_similar_cases(
|
| 597 |
+
reference_case_id: str,
|
| 598 |
+
similarity_threshold: float = 0.7,
|
| 599 |
+
limit: int = 50
|
| 600 |
+
) -> Dict[str, Any]:
|
| 601 |
+
"""
|
| 602 |
+
Find cases similar to a reference case based on patient characteristics,
|
| 603 |
+
drugs, and adverse events.
|
| 604 |
+
|
| 605 |
+
Args:
|
| 606 |
+
reference_case_id: FAERS safety report ID to use as reference
|
| 607 |
+
similarity_threshold: Minimum similarity score (0-1)
|
| 608 |
+
limit: Maximum number of similar cases to return
|
| 609 |
+
|
| 610 |
+
Returns:
|
| 611 |
+
Dict with similar cases and similarity scores
|
| 612 |
+
"""
|
| 613 |
+
|
| 614 |
+
# First, get the reference case details
|
| 615 |
+
from drug_data_endpoints import fetch_event_details
|
| 616 |
+
|
| 617 |
+
try:
|
| 618 |
+
ref_case = fetch_event_details(reference_case_id)
|
| 619 |
+
except Exception as e:
|
| 620 |
+
raise ValueError(f"Could not fetch reference case {reference_case_id}: {e}")
|
| 621 |
+
|
| 622 |
+
ref_drugs = [drug.lower() for drug in ref_case["drugs"]]
|
| 623 |
+
ref_reactions = [reaction.lower() for reaction in ref_case["reactions"]]
|
| 624 |
+
|
| 625 |
+
if not ref_drugs:
|
| 626 |
+
raise ValueError("Reference case has no drug information")
|
| 627 |
+
|
| 628 |
+
# Search for cases with similar drugs
|
| 629 |
+
primary_drug = ref_drugs[0] if ref_drugs else ""
|
| 630 |
+
|
| 631 |
+
similar_cases_response = enhanced_faers_search(
|
| 632 |
+
drug_name=primary_drug,
|
| 633 |
+
limit=min(limit * 3, 500) # Get more cases to filter
|
| 634 |
+
)
|
| 635 |
+
|
| 636 |
+
similar_cases = []
|
| 637 |
+
|
| 638 |
+
for case in similar_cases_response["cases"]:
|
| 639 |
+
case_drugs = [drug["name"].lower() for drug in case["drugs"] if drug["name"]]
|
| 640 |
+
case_reactions = [reaction["term"].lower() for reaction in case["reactions"] if reaction["term"]]
|
| 641 |
+
|
| 642 |
+
# Skip the reference case itself
|
| 643 |
+
if case["safety_report_id"] == reference_case_id:
|
| 644 |
+
continue
|
| 645 |
+
|
| 646 |
+
# Calculate similarity score
|
| 647 |
+
similarity_score = calculate_case_similarity(
|
| 648 |
+
ref_drugs, ref_reactions,
|
| 649 |
+
case_drugs, case_reactions,
|
| 650 |
+
ref_case.get("full_record", {}).get("patient", {}),
|
| 651 |
+
case.get("patient", {})
|
| 652 |
+
)
|
| 653 |
+
|
| 654 |
+
if similarity_score >= similarity_threshold:
|
| 655 |
+
similar_cases.append({
|
| 656 |
+
"case": case,
|
| 657 |
+
"similarity_score": similarity_score,
|
| 658 |
+
"similarity_factors": get_similarity_factors(
|
| 659 |
+
ref_drugs, ref_reactions, case_drugs, case_reactions
|
| 660 |
+
)
|
| 661 |
+
})
|
| 662 |
+
|
| 663 |
+
# Sort by similarity score
|
| 664 |
+
similar_cases.sort(key=lambda x: x["similarity_score"], reverse=True)
|
| 665 |
+
|
| 666 |
+
return {
|
| 667 |
+
"reference_case_id": reference_case_id,
|
| 668 |
+
"reference_drugs": ref_drugs,
|
| 669 |
+
"reference_reactions": ref_reactions,
|
| 670 |
+
"similar_cases": similar_cases[:limit],
|
| 671 |
+
"total_similar_found": len(similar_cases),
|
| 672 |
+
"similarity_threshold": similarity_threshold,
|
| 673 |
+
"analysis_summary": {
|
| 674 |
+
"most_common_shared_drugs": get_most_common_shared_elements(
|
| 675 |
+
[case["similarity_factors"]["shared_drugs"] for case in similar_cases]
|
| 676 |
+
),
|
| 677 |
+
"most_common_shared_reactions": get_most_common_shared_elements(
|
| 678 |
+
[case["similarity_factors"]["shared_reactions"] for case in similar_cases]
|
| 679 |
+
)
|
| 680 |
+
}
|
| 681 |
+
}
|
| 682 |
+
|
| 683 |
+
|
| 684 |
+
def calculate_case_similarity(
|
| 685 |
+
ref_drugs: List[str], ref_reactions: List[str],
|
| 686 |
+
case_drugs: List[str], case_reactions: List[str],
|
| 687 |
+
ref_patient: Dict, case_patient: Dict
|
| 688 |
+
) -> float:
|
| 689 |
+
"""Calculate similarity score between two cases."""
|
| 690 |
+
|
| 691 |
+
# Drug similarity (Jaccard index)
|
| 692 |
+
ref_drugs_set = set(ref_drugs)
|
| 693 |
+
case_drugs_set = set(case_drugs)
|
| 694 |
+
drug_intersection = len(ref_drugs_set & case_drugs_set)
|
| 695 |
+
drug_union = len(ref_drugs_set | case_drugs_set)
|
| 696 |
+
drug_similarity = drug_intersection / drug_union if drug_union > 0 else 0
|
| 697 |
+
|
| 698 |
+
# Reaction similarity (Jaccard index)
|
| 699 |
+
ref_reactions_set = set(ref_reactions)
|
| 700 |
+
case_reactions_set = set(case_reactions)
|
| 701 |
+
reaction_intersection = len(ref_reactions_set & case_reactions_set)
|
| 702 |
+
reaction_union = len(ref_reactions_set | case_reactions_set)
|
| 703 |
+
reaction_similarity = reaction_intersection / reaction_union if reaction_union > 0 else 0
|
| 704 |
+
|
| 705 |
+
# Patient similarity (age and gender)
|
| 706 |
+
patient_similarity = 0
|
| 707 |
+
similarity_factors = 0
|
| 708 |
+
|
| 709 |
+
# Age similarity
|
| 710 |
+
ref_age = ref_patient.get("patientage")
|
| 711 |
+
case_age = case_patient.get("age")
|
| 712 |
+
if ref_age and case_age:
|
| 713 |
+
try:
|
| 714 |
+
age_diff = abs(float(ref_age) - float(case_age))
|
| 715 |
+
age_similarity = max(0, 1 - age_diff / 50) # Normalize by 50 years
|
| 716 |
+
patient_similarity += age_similarity
|
| 717 |
+
similarity_factors += 1
|
| 718 |
+
except (ValueError, TypeError):
|
| 719 |
+
pass
|
| 720 |
+
|
| 721 |
+
# Gender similarity
|
| 722 |
+
ref_gender = ref_patient.get("patientsex")
|
| 723 |
+
case_gender = case_patient.get("gender")
|
| 724 |
+
if ref_gender and case_gender and ref_gender == case_gender:
|
| 725 |
+
patient_similarity += 1
|
| 726 |
+
similarity_factors += 1
|
| 727 |
+
elif ref_gender and case_gender:
|
| 728 |
+
similarity_factors += 1
|
| 729 |
+
|
| 730 |
+
if similarity_factors > 0:
|
| 731 |
+
patient_similarity /= similarity_factors
|
| 732 |
+
|
| 733 |
+
# Weighted overall similarity
|
| 734 |
+
# Drugs and reactions are most important, patient characteristics less so
|
| 735 |
+
overall_similarity = (
|
| 736 |
+
0.5 * drug_similarity +
|
| 737 |
+
0.4 * reaction_similarity +
|
| 738 |
+
0.1 * patient_similarity
|
| 739 |
+
)
|
| 740 |
+
|
| 741 |
+
return round(overall_similarity, 3)
|
| 742 |
+
|
| 743 |
+
|
| 744 |
+
def get_similarity_factors(
|
| 745 |
+
ref_drugs: List[str], ref_reactions: List[str],
|
| 746 |
+
case_drugs: List[str], case_reactions: List[str]
|
| 747 |
+
) -> Dict[str, List[str]]:
|
| 748 |
+
"""Get the specific shared elements between cases."""
|
| 749 |
+
|
| 750 |
+
shared_drugs = list(set(ref_drugs) & set(case_drugs))
|
| 751 |
+
shared_reactions = list(set(ref_reactions) & set(case_reactions))
|
| 752 |
+
|
| 753 |
+
return {
|
| 754 |
+
"shared_drugs": shared_drugs,
|
| 755 |
+
"shared_reactions": shared_reactions,
|
| 756 |
+
"unique_to_reference_drugs": list(set(ref_drugs) - set(case_drugs)),
|
| 757 |
+
"unique_to_case_drugs": list(set(case_drugs) - set(ref_drugs)),
|
| 758 |
+
"unique_to_reference_reactions": list(set(ref_reactions) - set(case_reactions)),
|
| 759 |
+
"unique_to_case_reactions": list(set(case_reactions) - set(ref_reactions))
|
| 760 |
+
}
|
| 761 |
+
|
| 762 |
+
|
| 763 |
+
def get_most_common_shared_elements(element_lists: List[List[str]]) -> Dict[str, int]:
|
| 764 |
+
"""Get the most commonly shared elements across multiple cases."""
|
| 765 |
+
|
| 766 |
+
all_elements = []
|
| 767 |
+
for element_list in element_lists:
|
| 768 |
+
all_elements.extend(element_list)
|
| 769 |
+
|
| 770 |
+
return dict(Counter(all_elements).most_common(10))
|
| 771 |
+
|
| 772 |
+
|
| 773 |
+
@with_error_handling
|
| 774 |
+
@with_caching(ttl=3600)
|
| 775 |
+
def temporal_analysis(
|
| 776 |
+
drug_name: str,
|
| 777 |
+
adverse_event: str = None,
|
| 778 |
+
limit: int = 500
|
| 779 |
+
) -> Dict[str, Any]:
|
| 780 |
+
"""
|
| 781 |
+
Analyze temporal patterns of adverse events for a drug.
|
| 782 |
+
|
| 783 |
+
Args:
|
| 784 |
+
drug_name: Drug to analyze
|
| 785 |
+
adverse_event: Specific adverse event (optional)
|
| 786 |
+
limit: Maximum cases to analyze
|
| 787 |
+
|
| 788 |
+
Returns:
|
| 789 |
+
Dict with temporal patterns and time-to-onset analysis
|
| 790 |
+
"""
|
| 791 |
+
|
| 792 |
+
# Get cases with temporal information
|
| 793 |
+
cases_response = enhanced_faers_search(
|
| 794 |
+
drug_name=drug_name,
|
| 795 |
+
adverse_event=adverse_event,
|
| 796 |
+
limit=limit
|
| 797 |
+
)
|
| 798 |
+
|
| 799 |
+
cases = cases_response["cases"]
|
| 800 |
+
|
| 801 |
+
if not cases:
|
| 802 |
+
return {
|
| 803 |
+
"drug": drug_name,
|
| 804 |
+
"adverse_event": adverse_event,
|
| 805 |
+
"message": "No cases found for temporal analysis"
|
| 806 |
+
}
|
| 807 |
+
|
| 808 |
+
# Analyze time to onset patterns
|
| 809 |
+
onset_times = []
|
| 810 |
+
reporting_dates = []
|
| 811 |
+
|
| 812 |
+
for case in cases:
|
| 813 |
+
# Extract drug start dates and reaction onset
|
| 814 |
+
for drug in case["drugs"]:
|
| 815 |
+
if drug["name"].lower() == drug_name.lower() and drug["start_date"]:
|
| 816 |
+
try:
|
| 817 |
+
# Parse date (YYYYMMDD format)
|
| 818 |
+
start_date = datetime.strptime(drug["start_date"], "%Y%m%d")
|
| 819 |
+
|
| 820 |
+
# For now, we'll use receive date as proxy for reaction onset
|
| 821 |
+
# In practice, you'd want more sophisticated temporal extraction
|
| 822 |
+
if case["receive_date"]:
|
| 823 |
+
receive_date = datetime.strptime(case["receive_date"], "%Y%m%d")
|
| 824 |
+
onset_time = (receive_date - start_date).days
|
| 825 |
+
if 0 <= onset_time <= 365: # Filter reasonable onset times
|
| 826 |
+
onset_times.append(onset_time)
|
| 827 |
+
reporting_dates.append(receive_date)
|
| 828 |
+
except (ValueError, TypeError):
|
| 829 |
+
continue
|
| 830 |
+
|
| 831 |
+
# Calculate temporal statistics
|
| 832 |
+
temporal_stats = {}
|
| 833 |
+
|
| 834 |
+
if onset_times:
|
| 835 |
+
onset_times.sort()
|
| 836 |
+
temporal_stats["time_to_onset"] = {
|
| 837 |
+
"median_days": onset_times[len(onset_times)//2],
|
| 838 |
+
"mean_days": round(sum(onset_times) / len(onset_times), 1),
|
| 839 |
+
"range_days": [min(onset_times), max(onset_times)],
|
| 840 |
+
"percentiles": {
|
| 841 |
+
"25th": onset_times[len(onset_times)//4],
|
| 842 |
+
"75th": onset_times[3*len(onset_times)//4],
|
| 843 |
+
"90th": onset_times[9*len(onset_times)//10] if len(onset_times) >= 10 else max(onset_times)
|
| 844 |
+
},
|
| 845 |
+
"distribution": categorize_onset_times(onset_times)
|
| 846 |
+
}
|
| 847 |
+
|
| 848 |
+
if reporting_dates:
|
| 849 |
+
# Analyze reporting trends over time
|
| 850 |
+
reporting_dates.sort()
|
| 851 |
+
temporal_stats["reporting_trends"] = analyze_reporting_trends(reporting_dates)
|
| 852 |
+
|
| 853 |
+
return {
|
| 854 |
+
"drug": drug_name,
|
| 855 |
+
"adverse_event": adverse_event,
|
| 856 |
+
"total_cases_analyzed": len(cases),
|
| 857 |
+
"cases_with_temporal_data": len(onset_times),
|
| 858 |
+
"temporal_analysis": temporal_stats,
|
| 859 |
+
"interpretation": interpret_temporal_patterns(temporal_stats)
|
| 860 |
+
}
|
| 861 |
+
|
| 862 |
+
|
| 863 |
+
def categorize_onset_times(onset_times: List[int]) -> Dict[str, int]:
|
| 864 |
+
"""Categorize onset times into clinically relevant periods."""
|
| 865 |
+
|
| 866 |
+
categories = {
|
| 867 |
+
"immediate_0_1_day": 0,
|
| 868 |
+
"acute_1_7_days": 0,
|
| 869 |
+
"subacute_1_4_weeks": 0,
|
| 870 |
+
"delayed_1_3_months": 0,
|
| 871 |
+
"late_3_12_months": 0
|
| 872 |
+
}
|
| 873 |
+
|
| 874 |
+
for onset in onset_times:
|
| 875 |
+
if onset <= 1:
|
| 876 |
+
categories["immediate_0_1_day"] += 1
|
| 877 |
+
elif onset <= 7:
|
| 878 |
+
categories["acute_1_7_days"] += 1
|
| 879 |
+
elif onset <= 28:
|
| 880 |
+
categories["subacute_1_4_weeks"] += 1
|
| 881 |
+
elif onset <= 90:
|
| 882 |
+
categories["delayed_1_3_months"] += 1
|
| 883 |
+
elif onset <= 365:
|
| 884 |
+
categories["late_3_12_months"] += 1
|
| 885 |
+
|
| 886 |
+
return categories
|
| 887 |
+
|
| 888 |
+
|
| 889 |
+
def analyze_reporting_trends(reporting_dates: List[datetime]) -> Dict[str, Any]:
|
| 890 |
+
"""Analyze trends in adverse event reporting over time."""
|
| 891 |
+
|
| 892 |
+
# Group by year
|
| 893 |
+
year_counts = defaultdict(int)
|
| 894 |
+
for date in reporting_dates:
|
| 895 |
+
year_counts[date.year] += 1
|
| 896 |
+
|
| 897 |
+
# Calculate trend
|
| 898 |
+
years = sorted(year_counts.keys())
|
| 899 |
+
if len(years) >= 3:
|
| 900 |
+
recent_avg = sum(year_counts[year] for year in years[-3:]) / 3
|
| 901 |
+
early_avg = sum(year_counts[year] for year in years[:3]) / 3
|
| 902 |
+
trend = "increasing" if recent_avg > early_avg * 1.2 else "decreasing" if recent_avg < early_avg * 0.8 else "stable"
|
| 903 |
+
else:
|
| 904 |
+
trend = "insufficient_data"
|
| 905 |
+
|
| 906 |
+
return {
|
| 907 |
+
"yearly_counts": dict(year_counts),
|
| 908 |
+
"date_range": [min(reporting_dates).year, max(reporting_dates).year],
|
| 909 |
+
"trend": trend,
|
| 910 |
+
"peak_year": max(year_counts.keys(), key=lambda k: year_counts[k]) if year_counts else None
|
| 911 |
+
}
|
| 912 |
+
|
| 913 |
+
|
| 914 |
+
def interpret_temporal_patterns(temporal_stats: Dict) -> List[str]:
|
| 915 |
+
"""Provide clinical interpretation of temporal patterns."""
|
| 916 |
+
|
| 917 |
+
interpretations = []
|
| 918 |
+
|
| 919 |
+
if "time_to_onset" in temporal_stats:
|
| 920 |
+
onset_data = temporal_stats["time_to_onset"]
|
| 921 |
+
median_onset = onset_data["median_days"]
|
| 922 |
+
|
| 923 |
+
if median_onset <= 1:
|
| 924 |
+
interpretations.append("Immediate onset pattern suggests Type A (dose-dependent) reaction or acute hypersensitivity")
|
| 925 |
+
elif median_onset <= 7:
|
| 926 |
+
interpretations.append("Acute onset pattern typical of many drug allergies and dose-related effects")
|
| 927 |
+
elif median_onset <= 28:
|
| 928 |
+
interpretations.append("Subacute onset may suggest immune-mediated or cumulative toxicity")
|
| 929 |
+
elif median_onset <= 90:
|
| 930 |
+
interpretations.append("Delayed onset pattern may indicate idiosyncratic reactions or chronic toxicity")
|
| 931 |
+
else:
|
| 932 |
+
interpretations.append("Late onset suggests possible chronic effects or delayed hypersensitivity")
|
| 933 |
+
|
| 934 |
+
# Check distribution
|
| 935 |
+
distribution = onset_data.get("distribution", {})
|
| 936 |
+
immediate = distribution.get("immediate_0_1_day", 0)
|
| 937 |
+
total_with_onset = sum(distribution.values())
|
| 938 |
+
|
| 939 |
+
if total_with_onset > 0:
|
| 940 |
+
immediate_pct = immediate / total_with_onset * 100
|
| 941 |
+
if immediate_pct > 50:
|
| 942 |
+
interpretations.append(f"High proportion ({immediate_pct:.1f}%) of immediate reactions suggests acute mechanism")
|
| 943 |
+
|
| 944 |
+
if "reporting_trends" in temporal_stats:
|
| 945 |
+
trend = temporal_stats["reporting_trends"]["trend"]
|
| 946 |
+
if trend == "increasing":
|
| 947 |
+
interpretations.append("Increasing reporting trend may indicate growing awareness or emerging safety signal")
|
| 948 |
+
elif trend == "decreasing":
|
| 949 |
+
interpretations.append("Decreasing reporting trend may suggest improved safety monitoring or reduced use")
|
| 950 |
+
|
| 951 |
+
if not interpretations:
|
| 952 |
+
interpretations.append("Insufficient temporal data for meaningful interpretation")
|
| 953 |
+
|
| 954 |
+
return interpretations
|
app.py
CHANGED
|
@@ -1,5 +1,6 @@
|
|
| 1 |
import gradio as gr
|
| 2 |
from typing import Dict, Any
|
|
|
|
| 3 |
|
| 4 |
from brand_to_generic import brand_lookup
|
| 5 |
from dbi_mcp import dbi_mcp
|
|
@@ -12,7 +13,7 @@ from clinical_calculators import (
|
|
| 12 |
dosing_weight_recommendation,
|
| 13 |
creatinine_conversion,
|
| 14 |
)
|
| 15 |
-
from caching import with_caching
|
| 16 |
from utils import with_error_handling, standardize_response, format_json_output
|
| 17 |
from drug_data_endpoints import (
|
| 18 |
search_adverse_events,
|
|
@@ -23,16 +24,15 @@ from drug_data_endpoints import (
|
|
| 23 |
drug_dose_adjustments,
|
| 24 |
drug_livertox_summary,
|
| 25 |
)
|
| 26 |
-
import
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
|
| 31 |
-
|
| 32 |
-
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
|
| 33 |
-
handlers=[logging.FileHandler("mcp_server.log"), logging.StreamHandler()],
|
| 34 |
)
|
| 35 |
-
|
|
|
|
| 36 |
|
| 37 |
|
| 38 |
@with_error_handling
|
|
@@ -408,6 +408,248 @@ def convert_creatinine_units_mcp(value: str, from_unit: str, to_unit: str) -> st
|
|
| 408 |
return format_json_output(result)
|
| 409 |
|
| 410 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 411 |
ae_search_ui = gr.Interface(
|
| 412 |
fn=search_adverse_events_mcp,
|
| 413 |
inputs=[gr.Text(label="Drug Name"), gr.Text(label="Limit", value="5")],
|
|
@@ -637,5 +879,4 @@ demo = gr.TabbedInterface(
|
|
| 637 |
)
|
| 638 |
|
| 639 |
if __name__ == "__main__":
|
| 640 |
-
logger.info("Starting Pharmacist MCP Server v1.1.0")
|
| 641 |
demo.launch(mcp_server=True, show_error=True)
|
|
|
|
| 1 |
import gradio as gr
|
| 2 |
from typing import Dict, Any
|
| 3 |
+
from datetime import datetime
|
| 4 |
|
| 5 |
from brand_to_generic import brand_lookup
|
| 6 |
from dbi_mcp import dbi_mcp
|
|
|
|
| 13 |
dosing_weight_recommendation,
|
| 14 |
creatinine_conversion,
|
| 15 |
)
|
| 16 |
+
from caching import with_caching, api_cache
|
| 17 |
from utils import with_error_handling, standardize_response, format_json_output
|
| 18 |
from drug_data_endpoints import (
|
| 19 |
search_adverse_events,
|
|
|
|
| 24 |
drug_dose_adjustments,
|
| 25 |
drug_livertox_summary,
|
| 26 |
)
|
| 27 |
+
from adr_analysis import (
|
| 28 |
+
enhanced_faers_search,
|
| 29 |
+
calculate_naranjo_score,
|
| 30 |
+
disproportionality_analysis,
|
| 31 |
+
find_similar_cases,
|
| 32 |
+
temporal_analysis,
|
|
|
|
|
|
|
| 33 |
)
|
| 34 |
+
import time
|
| 35 |
+
import sys
|
| 36 |
|
| 37 |
|
| 38 |
@with_error_handling
|
|
|
|
| 408 |
return format_json_output(result)
|
| 409 |
|
| 410 |
|
| 411 |
+
@with_error_handling
|
| 412 |
+
def get_cache_stats_mcp() -> str:
|
| 413 |
+
"""
|
| 414 |
+
Get cache statistics for monitoring and debugging.
|
| 415 |
+
|
| 416 |
+
Returns:
|
| 417 |
+
str: JSON string with cache hit rates, size, and other metrics
|
| 418 |
+
"""
|
| 419 |
+
stats = api_cache.get_stats()
|
| 420 |
+
expired_cleared = api_cache.clear_expired()
|
| 421 |
+
|
| 422 |
+
result = {
|
| 423 |
+
**stats,
|
| 424 |
+
"expired_entries_cleared": expired_cleared,
|
| 425 |
+
"cache_health": "good" if stats.get("hit_rate", 0) > 0.3 else "poor"
|
| 426 |
+
}
|
| 427 |
+
|
| 428 |
+
return format_json_output(standardize_response(result, "cache_stats"))
|
| 429 |
+
|
| 430 |
+
|
| 431 |
+
@with_error_handling
|
| 432 |
+
def health_check_mcp() -> str:
|
| 433 |
+
"""
|
| 434 |
+
Health check endpoint for monitoring MCP server status.
|
| 435 |
+
|
| 436 |
+
Returns:
|
| 437 |
+
str: JSON string with server health information
|
| 438 |
+
"""
|
| 439 |
+
# Test basic functionality
|
| 440 |
+
try:
|
| 441 |
+
# Test cache
|
| 442 |
+
cache_stats = api_cache.get_stats()
|
| 443 |
+
|
| 444 |
+
# Test a simple calculation
|
| 445 |
+
test_calc = cockcroft_gault_creatinine_clearance(65, 70, 1.2, False)
|
| 446 |
+
calc_working = test_calc.get("creatinine_clearance_ml_min") is not None
|
| 447 |
+
|
| 448 |
+
# Check if reference data is loaded
|
| 449 |
+
from pathlib import Path
|
| 450 |
+
ref_file_exists = Path("dbi_reference_by_route.csv").exists()
|
| 451 |
+
|
| 452 |
+
health_status = {
|
| 453 |
+
"status": "healthy",
|
| 454 |
+
"timestamp": datetime.now().isoformat(),
|
| 455 |
+
"uptime_info": {
|
| 456 |
+
"python_version": sys.version.split()[0],
|
| 457 |
+
"cache_working": cache_stats is not None,
|
| 458 |
+
"calculations_working": calc_working,
|
| 459 |
+
"reference_data_available": ref_file_exists
|
| 460 |
+
},
|
| 461 |
+
"cache_stats": cache_stats,
|
| 462 |
+
"version": "1.1.0"
|
| 463 |
+
}
|
| 464 |
+
|
| 465 |
+
# Determine overall health
|
| 466 |
+
if not calc_working or not ref_file_exists:
|
| 467 |
+
health_status["status"] = "degraded"
|
| 468 |
+
|
| 469 |
+
except Exception as e:
|
| 470 |
+
health_status = {
|
| 471 |
+
"status": "unhealthy",
|
| 472 |
+
"timestamp": datetime.now().isoformat(),
|
| 473 |
+
"error": str(e),
|
| 474 |
+
"version": "1.1.0"
|
| 475 |
+
}
|
| 476 |
+
|
| 477 |
+
return format_json_output(standardize_response(health_status, "health_check"))
|
| 478 |
+
|
| 479 |
+
|
| 480 |
+
# ===== NEW ADR ANALYSIS ENDPOINTS =====
|
| 481 |
+
|
| 482 |
+
@with_error_handling
|
| 483 |
+
def enhanced_faers_search_mcp(
|
| 484 |
+
drug_name: str,
|
| 485 |
+
adverse_event: str = "",
|
| 486 |
+
age_range: str = "",
|
| 487 |
+
gender: str = "",
|
| 488 |
+
serious_only: str = "false",
|
| 489 |
+
limit: str = "100"
|
| 490 |
+
) -> str:
|
| 491 |
+
"""
|
| 492 |
+
Enhanced FAERS search with filtering capabilities for pharmacovigilance analysis.
|
| 493 |
+
|
| 494 |
+
Args:
|
| 495 |
+
drug_name (str): Drug name to search for
|
| 496 |
+
adverse_event (str): Specific adverse event/reaction to filter by (optional)
|
| 497 |
+
age_range (str): Age range filter like "18-65" or ">65" (optional)
|
| 498 |
+
gender (str): Gender filter "1" (male) or "2" (female) (optional)
|
| 499 |
+
serious_only (str): "true" to only return serious adverse events
|
| 500 |
+
limit (str): Maximum number of results (default "100")
|
| 501 |
+
|
| 502 |
+
Returns:
|
| 503 |
+
str: JSON string with enhanced case data including demographics and outcomes
|
| 504 |
+
"""
|
| 505 |
+
limit_int = int(limit) if limit.isdigit() else 100
|
| 506 |
+
serious_bool = serious_only.lower() == "true"
|
| 507 |
+
|
| 508 |
+
# Convert empty strings to None
|
| 509 |
+
adverse_event = adverse_event if adverse_event.strip() else None
|
| 510 |
+
age_range = age_range if age_range.strip() else None
|
| 511 |
+
gender = gender if gender.strip() in ["1", "2"] else None
|
| 512 |
+
|
| 513 |
+
result = enhanced_faers_search(
|
| 514 |
+
drug_name=drug_name,
|
| 515 |
+
adverse_event=adverse_event,
|
| 516 |
+
age_range=age_range,
|
| 517 |
+
gender=gender,
|
| 518 |
+
serious_only=serious_bool,
|
| 519 |
+
limit=limit_int
|
| 520 |
+
)
|
| 521 |
+
|
| 522 |
+
return format_json_output(standardize_response(result, "enhanced_faers_search"))
|
| 523 |
+
|
| 524 |
+
|
| 525 |
+
@with_error_handling
|
| 526 |
+
def calculate_naranjo_score_mcp(
|
| 527 |
+
adverse_reaction_after_drug: str,
|
| 528 |
+
reaction_improved_after_stopping: str,
|
| 529 |
+
reaction_reappeared_after_readministration: str,
|
| 530 |
+
alternative_causes_exist: str,
|
| 531 |
+
reaction_when_placebo_given: str,
|
| 532 |
+
drug_detected_in_blood: str,
|
| 533 |
+
reaction_worse_with_higher_dose: str,
|
| 534 |
+
similar_reaction_to_drug_before: str,
|
| 535 |
+
adverse_event_confirmed_objectively: str,
|
| 536 |
+
reaction_appeared_after_suspected_drug_given: str
|
| 537 |
+
) -> str:
|
| 538 |
+
"""
|
| 539 |
+
Calculate Naranjo Adverse Drug Reaction Probability Scale.
|
| 540 |
+
|
| 541 |
+
Args:
|
| 542 |
+
All parameters should be "yes", "no", or "unknown"
|
| 543 |
+
|
| 544 |
+
Returns:
|
| 545 |
+
str: JSON string with score, probability category, and detailed breakdown
|
| 546 |
+
"""
|
| 547 |
+
result = calculate_naranjo_score(
|
| 548 |
+
adverse_reaction_after_drug=adverse_reaction_after_drug,
|
| 549 |
+
reaction_improved_after_stopping=reaction_improved_after_stopping,
|
| 550 |
+
reaction_reappeared_after_readministration=reaction_reappeared_after_readministration,
|
| 551 |
+
alternative_causes_exist=alternative_causes_exist,
|
| 552 |
+
reaction_when_placebo_given=reaction_when_placebo_given,
|
| 553 |
+
drug_detected_in_blood=drug_detected_in_blood,
|
| 554 |
+
reaction_worse_with_higher_dose=reaction_worse_with_higher_dose,
|
| 555 |
+
similar_reaction_to_drug_before=similar_reaction_to_drug_before,
|
| 556 |
+
adverse_event_confirmed_objectively=adverse_event_confirmed_objectively,
|
| 557 |
+
reaction_appeared_after_suspected_drug_given=reaction_appeared_after_suspected_drug_given
|
| 558 |
+
)
|
| 559 |
+
|
| 560 |
+
return format_json_output(standardize_response(result, "naranjo_score"))
|
| 561 |
+
|
| 562 |
+
|
| 563 |
+
@with_error_handling
|
| 564 |
+
def disproportionality_analysis_mcp(
|
| 565 |
+
drug_name: str,
|
| 566 |
+
adverse_event: str,
|
| 567 |
+
background_limit: str = "10000"
|
| 568 |
+
) -> str:
|
| 569 |
+
"""
|
| 570 |
+
Perform disproportionality analysis to detect potential drug-adverse event signals.
|
| 571 |
+
|
| 572 |
+
Args:
|
| 573 |
+
drug_name (str): Drug of interest
|
| 574 |
+
adverse_event (str): Adverse event of interest
|
| 575 |
+
background_limit (str): Number of background cases to sample (default "10000")
|
| 576 |
+
|
| 577 |
+
Returns:
|
| 578 |
+
str: JSON string with PRR, ROR, IC values and statistical significance
|
| 579 |
+
"""
|
| 580 |
+
background_limit_int = int(background_limit) if background_limit.isdigit() else 10000
|
| 581 |
+
|
| 582 |
+
result = disproportionality_analysis(
|
| 583 |
+
drug_name=drug_name,
|
| 584 |
+
adverse_event=adverse_event,
|
| 585 |
+
background_limit=background_limit_int
|
| 586 |
+
)
|
| 587 |
+
|
| 588 |
+
return format_json_output(standardize_response(result, "disproportionality_analysis"))
|
| 589 |
+
|
| 590 |
+
|
| 591 |
+
@with_error_handling
|
| 592 |
+
def find_similar_cases_mcp(
|
| 593 |
+
reference_case_id: str,
|
| 594 |
+
similarity_threshold: str = "0.7",
|
| 595 |
+
limit: str = "50"
|
| 596 |
+
) -> str:
|
| 597 |
+
"""
|
| 598 |
+
Find cases similar to a reference case based on patient characteristics, drugs, and adverse events.
|
| 599 |
+
|
| 600 |
+
Args:
|
| 601 |
+
reference_case_id (str): FAERS safety report ID to use as reference
|
| 602 |
+
similarity_threshold (str): Minimum similarity score 0-1 (default "0.7")
|
| 603 |
+
limit (str): Maximum number of similar cases to return (default "50")
|
| 604 |
+
|
| 605 |
+
Returns:
|
| 606 |
+
str: JSON string with similar cases and similarity scores
|
| 607 |
+
"""
|
| 608 |
+
try:
|
| 609 |
+
similarity_threshold_float = float(similarity_threshold)
|
| 610 |
+
except ValueError:
|
| 611 |
+
similarity_threshold_float = 0.7
|
| 612 |
+
|
| 613 |
+
limit_int = int(limit) if limit.isdigit() else 50
|
| 614 |
+
|
| 615 |
+
result = find_similar_cases(
|
| 616 |
+
reference_case_id=reference_case_id,
|
| 617 |
+
similarity_threshold=similarity_threshold_float,
|
| 618 |
+
limit=limit_int
|
| 619 |
+
)
|
| 620 |
+
|
| 621 |
+
return format_json_output(standardize_response(result, "similar_cases"))
|
| 622 |
+
|
| 623 |
+
|
| 624 |
+
@with_error_handling
|
| 625 |
+
def temporal_analysis_mcp(
|
| 626 |
+
drug_name: str,
|
| 627 |
+
adverse_event: str = "",
|
| 628 |
+
limit: str = "500"
|
| 629 |
+
) -> str:
|
| 630 |
+
"""
|
| 631 |
+
Analyze temporal patterns of adverse events for a drug.
|
| 632 |
+
|
| 633 |
+
Args:
|
| 634 |
+
drug_name (str): Drug to analyze
|
| 635 |
+
adverse_event (str): Specific adverse event (optional)
|
| 636 |
+
limit (str): Maximum cases to analyze (default "500")
|
| 637 |
+
|
| 638 |
+
Returns:
|
| 639 |
+
str: JSON string with temporal patterns and time-to-onset analysis
|
| 640 |
+
"""
|
| 641 |
+
adverse_event = adverse_event if adverse_event.strip() else None
|
| 642 |
+
limit_int = int(limit) if limit.isdigit() else 500
|
| 643 |
+
|
| 644 |
+
result = temporal_analysis(
|
| 645 |
+
drug_name=drug_name,
|
| 646 |
+
adverse_event=adverse_event,
|
| 647 |
+
limit=limit_int
|
| 648 |
+
)
|
| 649 |
+
|
| 650 |
+
return format_json_output(standardize_response(result, "temporal_analysis"))
|
| 651 |
+
|
| 652 |
+
|
| 653 |
ae_search_ui = gr.Interface(
|
| 654 |
fn=search_adverse_events_mcp,
|
| 655 |
inputs=[gr.Text(label="Drug Name"), gr.Text(label="Limit", value="5")],
|
|
|
|
| 879 |
)
|
| 880 |
|
| 881 |
if __name__ == "__main__":
|
|
|
|
| 882 |
demo.launch(mcp_server=True, show_error=True)
|
brand_to_generic.py
CHANGED
|
@@ -14,6 +14,10 @@ logger = logging.getLogger(__name__)
|
|
| 14 |
|
| 15 |
_session = requests.Session()
|
| 16 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 17 |
|
| 18 |
class _Throttle:
|
| 19 |
"""Simple host-level throttle (~1 rps)."""
|
|
@@ -359,6 +363,19 @@ def brand_lookup(
|
|
| 359 |
return {"brand_searched": brand, "results": []}
|
| 360 |
|
| 361 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 362 |
if __name__ == "__main__":
|
| 363 |
import sys, pprint
|
| 364 |
|
|
|
|
| 14 |
|
| 15 |
_session = requests.Session()
|
| 16 |
|
| 17 |
+
# Reduce timeouts for better performance
|
| 18 |
+
DEFAULT_TIMEOUT = 5 # Reduced from 10
|
| 19 |
+
FAST_TIMEOUT = 3 # For quick checks
|
| 20 |
+
|
| 21 |
|
| 22 |
class _Throttle:
|
| 23 |
"""Simple host-level throttle (~1 rps)."""
|
|
|
|
| 363 |
return {"brand_searched": brand, "results": []}
|
| 364 |
|
| 365 |
|
| 366 |
+
def make_api_request_with_timeout(url: str, params: dict, timeout: int = DEFAULT_TIMEOUT):
|
| 367 |
+
"""Make API request with configurable timeout and better error handling."""
|
| 368 |
+
try:
|
| 369 |
+
response = requests.get(url, params=params, timeout=timeout)
|
| 370 |
+
return response
|
| 371 |
+
except requests.exceptions.Timeout:
|
| 372 |
+
logger.warning(f"Timeout after {timeout}s for {url}")
|
| 373 |
+
return None
|
| 374 |
+
except requests.exceptions.RequestException as e:
|
| 375 |
+
logger.warning(f"Request failed for {url}: {e}")
|
| 376 |
+
return None
|
| 377 |
+
|
| 378 |
+
|
| 379 |
if __name__ == "__main__":
|
| 380 |
import sys, pprint
|
| 381 |
|
caching.py
CHANGED
|
@@ -11,6 +11,7 @@ class SimpleCache:
|
|
| 11 |
def __init__(self, default_ttl: int = 3600):
|
| 12 |
self.cache: Dict[str, Dict[str, Any]] = {}
|
| 13 |
self.default_ttl = default_ttl
|
|
|
|
| 14 |
|
| 15 |
def _is_expired(self, entry: Dict[str, Any]) -> bool:
|
| 16 |
return datetime.now() > entry['expires']
|
|
@@ -19,11 +20,15 @@ class SimpleCache:
|
|
| 19 |
if key in self.cache:
|
| 20 |
entry = self.cache[key]
|
| 21 |
if not self._is_expired(entry):
|
| 22 |
-
|
|
|
|
| 23 |
return entry['data']
|
| 24 |
else:
|
| 25 |
del self.cache[key]
|
| 26 |
-
|
|
|
|
|
|
|
|
|
|
| 27 |
return None
|
| 28 |
|
| 29 |
def set(self, key: str, data: Any, ttl: Optional[int] = None) -> None:
|
|
@@ -32,7 +37,27 @@ class SimpleCache:
|
|
| 32 |
'data': data,
|
| 33 |
'expires': datetime.now() + timedelta(seconds=ttl)
|
| 34 |
}
|
| 35 |
-
logger.debug(f"Cached data for key: {key}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 36 |
|
| 37 |
api_cache = SimpleCache()
|
| 38 |
|
|
|
|
| 11 |
def __init__(self, default_ttl: int = 3600):
|
| 12 |
self.cache: Dict[str, Dict[str, Any]] = {}
|
| 13 |
self.default_ttl = default_ttl
|
| 14 |
+
self.stats = {"hits": 0, "misses": 0, "expired": 0}
|
| 15 |
|
| 16 |
def _is_expired(self, entry: Dict[str, Any]) -> bool:
|
| 17 |
return datetime.now() > entry['expires']
|
|
|
|
| 20 |
if key in self.cache:
|
| 21 |
entry = self.cache[key]
|
| 22 |
if not self._is_expired(entry):
|
| 23 |
+
self.stats["hits"] += 1
|
| 24 |
+
logger.debug(f"Cache hit for key: {key[:20]}...")
|
| 25 |
return entry['data']
|
| 26 |
else:
|
| 27 |
del self.cache[key]
|
| 28 |
+
self.stats["expired"] += 1
|
| 29 |
+
logger.debug(f"Cache expired for key: {key[:20]}...")
|
| 30 |
+
|
| 31 |
+
self.stats["misses"] += 1
|
| 32 |
return None
|
| 33 |
|
| 34 |
def set(self, key: str, data: Any, ttl: Optional[int] = None) -> None:
|
|
|
|
| 37 |
'data': data,
|
| 38 |
'expires': datetime.now() + timedelta(seconds=ttl)
|
| 39 |
}
|
| 40 |
+
logger.debug(f"Cached data for key: {key[:20]}... (TTL: {ttl}s)")
|
| 41 |
+
|
| 42 |
+
def get_stats(self) -> Dict[str, Any]:
|
| 43 |
+
"""Get cache statistics for monitoring."""
|
| 44 |
+
total_requests = sum(self.stats.values())
|
| 45 |
+
hit_rate = self.stats["hits"] / total_requests if total_requests > 0 else 0
|
| 46 |
+
return {
|
| 47 |
+
**self.stats,
|
| 48 |
+
"hit_rate": round(hit_rate, 3),
|
| 49 |
+
"cache_size": len(self.cache)
|
| 50 |
+
}
|
| 51 |
+
|
| 52 |
+
def clear_expired(self) -> int:
|
| 53 |
+
"""Manually clear expired entries and return count cleared."""
|
| 54 |
+
expired_keys = [
|
| 55 |
+
key for key, entry in self.cache.items()
|
| 56 |
+
if self._is_expired(entry)
|
| 57 |
+
]
|
| 58 |
+
for key in expired_keys:
|
| 59 |
+
del self.cache[key]
|
| 60 |
+
return len(expired_keys)
|
| 61 |
|
| 62 |
api_cache = SimpleCache()
|
| 63 |
|
clinical_calculators.py
CHANGED
|
@@ -26,8 +26,19 @@ def cockcroft_gault_creatinine_clearance(
|
|
| 26 |
Returns:
|
| 27 |
Dict with calculated creatinine clearance and interpretation
|
| 28 |
"""
|
| 29 |
-
|
| 30 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 31 |
|
| 32 |
clearance = ((140 - age) * weight_kg) / (72 * serum_creatinine_mg_dl)
|
| 33 |
|
|
|
|
| 26 |
Returns:
|
| 27 |
Dict with calculated creatinine clearance and interpretation
|
| 28 |
"""
|
| 29 |
+
# Enhanced input validation with specific error messages
|
| 30 |
+
if age <= 0:
|
| 31 |
+
raise ValueError(f"Age must be positive, got {age}")
|
| 32 |
+
if age > 120:
|
| 33 |
+
raise ValueError(f"Age seems unrealistic, got {age}")
|
| 34 |
+
if weight_kg <= 0:
|
| 35 |
+
raise ValueError(f"Weight must be positive, got {weight_kg}")
|
| 36 |
+
if weight_kg > 500:
|
| 37 |
+
raise ValueError(f"Weight seems unrealistic, got {weight_kg}")
|
| 38 |
+
if serum_creatinine_mg_dl <= 0:
|
| 39 |
+
raise ValueError(f"Serum creatinine must be positive, got {serum_creatinine_mg_dl}")
|
| 40 |
+
if serum_creatinine_mg_dl > 20:
|
| 41 |
+
raise ValueError(f"Serum creatinine seems unrealistic, got {serum_creatinine_mg_dl}")
|
| 42 |
|
| 43 |
clearance = ((140 - age) * weight_kg) / (72 * serum_creatinine_mg_dl)
|
| 44 |
|
drug_data_endpoints.py
CHANGED
|
@@ -33,15 +33,27 @@ def search_adverse_events(drug_name: str, limit: int = 5):
|
|
| 33 |
Dict with a ``contexts`` key - list of objects ``{id, text}`` suitable
|
| 34 |
for an LLM to inject as context.
|
| 35 |
"""
|
|
|
|
|
|
|
|
|
|
|
|
|
| 36 |
base_url = "https://api.fda.gov/drug/event.json"
|
| 37 |
query_params = {
|
| 38 |
-
"search": f'patient.drug.medicinalproduct:"{drug_name}"',
|
| 39 |
-
"limit": min(limit, 100)
|
| 40 |
}
|
| 41 |
|
| 42 |
-
response = make_api_request(base_url, query_params)
|
| 43 |
|
| 44 |
if response.status_code != 200:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 45 |
raise requests.exceptions.RequestException(f"FAERS search failed: {response.status_code}")
|
| 46 |
|
| 47 |
data = response.json()
|
|
@@ -210,7 +222,7 @@ REPRODUCTIVE_POTENTIAL_PAT = re.compile(r"(?:8\.3\s*(?:Females\s+and\s+Males\s+o
|
|
| 210 |
@with_caching(ttl=7200)
|
| 211 |
def drug_pregnancy_lactation(drug_name: str):
|
| 212 |
"""
|
| 213 |
-
Return Pregnancy & Lactation text from FDA label.
|
| 214 |
|
| 215 |
Args:
|
| 216 |
drug_name: Generic name preferred.
|
|
@@ -218,43 +230,76 @@ def drug_pregnancy_lactation(drug_name: str):
|
|
| 218 |
Returns:
|
| 219 |
Dict with pregnancy_text, pregnancy_registry, lactation_text, and reproductive_potential_text.
|
| 220 |
"""
|
| 221 |
-
|
| 222 |
-
|
| 223 |
-
"
|
| 224 |
-
"limit": 1
|
| 225 |
-
}
|
| 226 |
|
| 227 |
-
|
| 228 |
|
| 229 |
-
|
| 230 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 231 |
|
| 232 |
-
|
| 233 |
-
if not data.get("results"):
|
| 234 |
-
raise ValueError("Label not found")
|
| 235 |
-
|
| 236 |
-
lab = data["results"][0]
|
| 237 |
|
| 238 |
-
|
| 239 |
-
|
| 240 |
-
|
| 241 |
-
|
| 242 |
-
|
| 243 |
-
|
| 244 |
-
|
| 245 |
-
|
| 246 |
-
|
| 247 |
-
|
| 248 |
-
|
| 249 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 250 |
return {
|
| 251 |
-
"pregnancy_text":
|
| 252 |
-
"pregnancy_registry":
|
| 253 |
-
"lactation_text":
|
| 254 |
-
"reproductive_potential_text":
|
| 255 |
-
"drug_name": drug_name
|
|
|
|
| 256 |
}
|
| 257 |
|
|
|
|
|
|
|
| 258 |
RENAL_PAT = re.compile(r"\brenal\b.*?\b(impairment|dysfunction|failure)\b", re.I | re.S)
|
| 259 |
HEP_PAT = re.compile(r"\bhepatic\b.*?\b(impairment|dysfunction|child(?:--|\s|-)?pugh)\b", re.I | re.S)
|
| 260 |
|
|
|
|
| 33 |
Dict with a ``contexts`` key - list of objects ``{id, text}`` suitable
|
| 34 |
for an LLM to inject as context.
|
| 35 |
"""
|
| 36 |
+
# Input validation
|
| 37 |
+
if not drug_name or not drug_name.strip():
|
| 38 |
+
raise ValueError("Drug name cannot be empty")
|
| 39 |
+
|
| 40 |
base_url = "https://api.fda.gov/drug/event.json"
|
| 41 |
query_params = {
|
| 42 |
+
"search": f'patient.drug.medicinalproduct:"{drug_name.strip()}"',
|
| 43 |
+
"limit": min(max(1, limit), 100) # Ensure limit is between 1 and 100
|
| 44 |
}
|
| 45 |
|
| 46 |
+
response = make_api_request(base_url, query_params, timeout=10)
|
| 47 |
|
| 48 |
if response.status_code != 200:
|
| 49 |
+
if response.status_code == 404:
|
| 50 |
+
# Return empty results instead of error for not found
|
| 51 |
+
return {
|
| 52 |
+
"contexts": [],
|
| 53 |
+
"total_found": 0,
|
| 54 |
+
"query": drug_name,
|
| 55 |
+
"message": "No adverse events found for this drug"
|
| 56 |
+
}
|
| 57 |
raise requests.exceptions.RequestException(f"FAERS search failed: {response.status_code}")
|
| 58 |
|
| 59 |
data = response.json()
|
|
|
|
| 222 |
@with_caching(ttl=7200)
|
| 223 |
def drug_pregnancy_lactation(drug_name: str):
|
| 224 |
"""
|
| 225 |
+
Return Pregnancy & Lactation text from FDA label with improved search and fallback data.
|
| 226 |
|
| 227 |
Args:
|
| 228 |
drug_name: Generic name preferred.
|
|
|
|
| 230 |
Returns:
|
| 231 |
Dict with pregnancy_text, pregnancy_registry, lactation_text, and reproductive_potential_text.
|
| 232 |
"""
|
| 233 |
+
# Input validation
|
| 234 |
+
if not drug_name or not drug_name.strip():
|
| 235 |
+
raise ValueError("Drug name cannot be empty")
|
|
|
|
|
|
|
| 236 |
|
| 237 |
+
drug_name = drug_name.strip()
|
| 238 |
|
| 239 |
+
# Try multiple search strategies
|
| 240 |
+
search_strategies = [
|
| 241 |
+
f'openfda.generic_name:"{drug_name}"',
|
| 242 |
+
f'openfda.brand_name:"{drug_name}"',
|
| 243 |
+
f'openfda.substance_name:"{drug_name}"',
|
| 244 |
+
f'generic_name:"{drug_name}"',
|
| 245 |
+
f'brand_name:"{drug_name}"'
|
| 246 |
+
]
|
| 247 |
|
| 248 |
+
base_url = "https://api.fda.gov/drug/label.json"
|
|
|
|
|
|
|
|
|
|
|
|
|
| 249 |
|
| 250 |
+
for search_query in search_strategies:
|
| 251 |
+
try:
|
| 252 |
+
query_params = {
|
| 253 |
+
"search": search_query,
|
| 254 |
+
"limit": 1
|
| 255 |
+
}
|
| 256 |
+
|
| 257 |
+
response = make_api_request(base_url, query_params, timeout=8)
|
| 258 |
+
|
| 259 |
+
if response and response.status_code == 200:
|
| 260 |
+
data = response.json()
|
| 261 |
+
if data.get("results"):
|
| 262 |
+
lab = data["results"][0]
|
| 263 |
+
|
| 264 |
+
# Extract pregnancy/lactation data
|
| 265 |
+
use_in_specific_populations_text = "\n".join(lab.get("use_in_specific_populations", []))
|
| 266 |
+
|
| 267 |
+
lactation_match = LACTATION_PAT.search(use_in_specific_populations_text)
|
| 268 |
+
lactation_text = lactation_match.group(1).strip() if lactation_match else lab.get("lactation", [""])[0]
|
| 269 |
+
if not lactation_text and lactation_match:
|
| 270 |
+
lactation_text = lactation_match.group(1).strip()
|
| 271 |
+
|
| 272 |
+
reproductive_potential_match = REPRODUCTIVE_POTENTIAL_PAT.search(use_in_specific_populations_text)
|
| 273 |
+
reproductive_potential_text = reproductive_potential_match.group(1).strip() if reproductive_potential_match else ""
|
| 274 |
+
|
| 275 |
+
pregnancy_text = lab.get("pregnancy", [""])[0]
|
| 276 |
+
pregnancy_registry = lab.get("pregnancy_exposure_registry", [""])[0]
|
| 277 |
+
|
| 278 |
+
# If we found meaningful data, return it
|
| 279 |
+
if pregnancy_text or lactation_text or reproductive_potential_text:
|
| 280 |
+
return {
|
| 281 |
+
"pregnancy_text": pregnancy_text or "Not found or not specified in the label.",
|
| 282 |
+
"pregnancy_registry": pregnancy_registry or "Not specified.",
|
| 283 |
+
"lactation_text": lactation_text or "Not found or not specified in the label.",
|
| 284 |
+
"reproductive_potential_text": reproductive_potential_text or "Not found or not specified in the label.",
|
| 285 |
+
"drug_name": drug_name,
|
| 286 |
+
"data_source": f"FDA Label (search: {search_query})"
|
| 287 |
+
}
|
| 288 |
+
except Exception as e:
|
| 289 |
+
continue
|
| 290 |
+
|
| 291 |
+
# If FDA search fails, return not found message
|
| 292 |
return {
|
| 293 |
+
"pregnancy_text": "FDA label data not available for this drug.",
|
| 294 |
+
"pregnancy_registry": "Not specified.",
|
| 295 |
+
"lactation_text": "FDA label data not available for this drug.",
|
| 296 |
+
"reproductive_potential_text": "FDA label data not available for this drug.",
|
| 297 |
+
"drug_name": drug_name,
|
| 298 |
+
"data_source": "FDA Label (not found)"
|
| 299 |
}
|
| 300 |
|
| 301 |
+
|
| 302 |
+
|
| 303 |
RENAL_PAT = re.compile(r"\brenal\b.*?\b(impairment|dysfunction|failure)\b", re.I | re.S)
|
| 304 |
HEP_PAT = re.compile(r"\bhepatic\b.*?\b(impairment|dysfunction|child(?:--|\s|-)?pugh)\b", re.I | re.S)
|
| 305 |
|
requirements.txt
CHANGED
|
@@ -1,4 +1,4 @@
|
|
| 1 |
-
gradio
|
| 2 |
requests
|
| 3 |
datasets
|
| 4 |
beautifulsoup4
|
|
|
|
| 1 |
+
gradio[mcp]
|
| 2 |
requests
|
| 3 |
datasets
|
| 4 |
beautifulsoup4
|
utils.py
CHANGED
|
@@ -48,6 +48,10 @@ def with_error_handling(func):
|
|
| 48 |
|
| 49 |
def standardize_response(data: Dict[str, Any], source: str) -> Dict[str, Any]:
|
| 50 |
"""Standardize API response format with metadata."""
|
|
|
|
|
|
|
|
|
|
|
|
|
| 51 |
return {
|
| 52 |
"data": data,
|
| 53 |
"metadata": {
|
|
@@ -59,9 +63,9 @@ def standardize_response(data: Dict[str, Any], source: str) -> Dict[str, Any]:
|
|
| 59 |
"status": "success"
|
| 60 |
}
|
| 61 |
|
| 62 |
-
def create_error_response(error_msg: str, source: str) -> Dict[str, Any]:
|
| 63 |
-
"""Create standardized error response."""
|
| 64 |
-
|
| 65 |
"data": None,
|
| 66 |
"metadata": {
|
| 67 |
"source": source,
|
|
@@ -71,6 +75,11 @@ def create_error_response(error_msg: str, source: str) -> Dict[str, Any]:
|
|
| 71 |
"status": "error",
|
| 72 |
"error": error_msg
|
| 73 |
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 74 |
|
| 75 |
def make_api_request(url: str, params: Dict[str, Any], timeout: int = 15, max_retries: int = 3) -> requests.Response:
|
| 76 |
"""Make API request with retry logic and rate limiting."""
|
|
|
|
| 48 |
|
| 49 |
def standardize_response(data: Dict[str, Any], source: str) -> Dict[str, Any]:
|
| 50 |
"""Standardize API response format with metadata."""
|
| 51 |
+
# Check if data is already a standardized response to avoid double nesting
|
| 52 |
+
if isinstance(data, dict) and "data" in data and "metadata" in data and "status" in data:
|
| 53 |
+
return data
|
| 54 |
+
|
| 55 |
return {
|
| 56 |
"data": data,
|
| 57 |
"metadata": {
|
|
|
|
| 63 |
"status": "success"
|
| 64 |
}
|
| 65 |
|
| 66 |
+
def create_error_response(error_msg: str, source: str, error_code: str = None) -> Dict[str, Any]:
|
| 67 |
+
"""Create standardized error response with optional error code."""
|
| 68 |
+
response = {
|
| 69 |
"data": None,
|
| 70 |
"metadata": {
|
| 71 |
"source": source,
|
|
|
|
| 75 |
"status": "error",
|
| 76 |
"error": error_msg
|
| 77 |
}
|
| 78 |
+
|
| 79 |
+
if error_code:
|
| 80 |
+
response["error_code"] = error_code
|
| 81 |
+
|
| 82 |
+
return response
|
| 83 |
|
| 84 |
def make_api_request(url: str, params: Dict[str, Any], timeout: int = 15, max_retries: int = 3) -> requests.Response:
|
| 85 |
"""Make API request with retry logic and rate limiting."""
|