hamnamughal commited on
Commit
edc7c18
·
verified ·
1 Parent(s): da32aec

Update health_tracker_agent.py

Browse files
Files changed (1) hide show
  1. health_tracker_agent.py +11 -38
health_tracker_agent.py CHANGED
@@ -1,38 +1,11 @@
1
- import pandas as pd
2
- import openai
3
-
4
- health_data = {
5
- "blood_pressure": [],
6
- "weight": [],
7
- "heart_rate": []
8
- }
9
-
10
- def log_symptom(symptom: str, value: str):
11
- if symptom not in health_data:
12
- health_data[symptom] = []
13
- health_data[symptom].append(value)
14
- return f"Symptom {symptom} with value {value} logged successfully."
15
-
16
- def get_health_report():
17
- trend_data = pd.DataFrame({
18
- "Date": ["2023-04-01", "2023-04-05", "2023-04-10"],
19
- "Blood Pressure": ["130/85", "135/88", "140/90"],
20
- "Heart Rate": [72, 75, 78]
21
- })
22
- return trend_data
23
-
24
- def track_health_stat(stat_type: str):
25
- if stat_type in health_data:
26
- return health_data[stat_type]
27
- return "Stat type not found."
28
-
29
- def personalized_health_advice(symptom: str, value: str = ""):
30
- response = openai.ChatCompletion.create(
31
- model="gpt-3.5-turbo",
32
- messages=[{
33
- "role": "user",
34
- "content": f"Provide personalized health advice for someone experiencing {symptom} with severity: {value}."
35
- }],
36
- max_tokens=150
37
- )
38
- return response['choices'][0]['message']['content'].strip()
 
1
+ from transformers import AutoTokenizer, AutoModelForCausalLM
2
+ import torch
3
+
4
+ def track_health_status(health_input):
5
+ model_name = "m42-health/med42-70b"
6
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
7
+ model = AutoModelForCausalLM.from_pretrained(model_name)
8
+ prompt = f"Based on the following health input, track possible issues or advice: {health_input}."
9
+ inputs = tokenizer(prompt, return_tensors="pt")
10
+ outputs = model.generate(**inputs, max_new_tokens=150)
11
+ return tokenizer.decode(outputs[0], skip_special_tokens=True)