safiaa02's picture
Update app.py
f3b63f9 verified
raw
history blame
5.7 kB
import os
import json
import streamlit as st
import faiss
import numpy as np
import torch
from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
from sentence_transformers import SentenceTransformer
from reportlab.lib.pagesizes import A4
from reportlab.platypus import Paragraph, SimpleDocTemplate, Spacer
from reportlab.lib.styles import getSampleStyleSheet
# Load milestones data
with open('milestones.json', 'r') as f:
milestones = json.load(f)
# Age categories for dropdown selection
age_categories = {
"Up to 2 months": 2, "Up to 4 months": 4, "Up to 6 months": 6,
"Up to 9 months": 9, "Up to 1 year": 12, "Up to 15 months": 15,
"Up to 18 months": 18, "Up to 2 years": 24, "Up to 30 months": 30,
"Up to 3 years": 36, "Up to 4 years": 48, "Up to 5 years": 60
}
# Initialize FAISS and Sentence Transformer
embedding_model = SentenceTransformer('all-MiniLM-L6-v2')
def create_faiss_index(data):
descriptions, age_keys = [], []
for age, categories in data.items():
for entry in categories:
descriptions.append(entry['description'])
age_keys.append(int(age)) # Convert age to int
embeddings = embedding_model.encode(descriptions, convert_to_numpy=True)
index = faiss.IndexFlatL2(embeddings.shape[1])
index.add(embeddings)
return index, descriptions, age_keys
index, descriptions, age_keys = create_faiss_index(milestones)
# Function to retrieve the closest milestone
def retrieve_milestone(user_input):
user_embedding = embedding_model.encode([user_input], convert_to_numpy=True)
_, indices = index.search(user_embedding, 1)
return descriptions[indices[0][0]] if indices[0][0] < len(descriptions) else "No relevant milestone found."
# Load IBM Granite 3.1 model and tokenizer
model_name = "ibm-granite/granite-3.1-8b-instruct"
tokenizer = AutoTokenizer.from_pretrained(model_name)
granite_model = AutoModelForSeq2SeqLM.from_pretrained(
model_name, torch_dtype=torch.float16, device_map="auto"
)
def generate_response(user_input, child_age):
relevant_milestone = retrieve_milestone(user_input)
prompt = (
f"The child is {child_age} months old. Based on the given traits: {user_input}, "
f"determine whether the child is meeting expected milestones. "
f"Relevant milestone: {relevant_milestone}. "
"If there are any concerns, suggest steps the parents can take."
)
inputs = tokenizer(prompt, return_tensors="pt").to("cuda" if torch.cuda.is_available() else "cpu")
output = granite_model.generate(**inputs, max_length=512)
return tokenizer.decode(output[0], skip_special_tokens=True)
# Streamlit UI Styling
st.set_page_config(page_title="Tiny Triumphs Tracker", page_icon="👶", layout="wide")
st.markdown("""
<style>
.stApp { background-color: #1e1e2e; color: #ffffff; }
.stTitle { text-align: center; color: #ffcc00; font-size: 36px; font-weight: bold; }
.stButton > button { background-color: #ffcc00; color: #000; border-radius: 5px; font-weight: bold; }
.stSelectbox, .stTextArea { background-color: #2e2e42; color: #ffffff; border-radius: 5px; }
</style>
""", unsafe_allow_html=True)
st.markdown("<h1 class='stTitle'>👶 Tiny Triumphs Tracker</h1>", unsafe_allow_html=True)
st.markdown("Track your child's key growth milestones from birth to 5 years and detect early developmental concerns.", unsafe_allow_html=True)
# User selects child's age
selected_age = st.selectbox("📅 Select child's age:", list(age_categories.keys()))
child_age = age_categories[selected_age]
# User input for traits and skills
placeholder_text = "For example, your child might say simple words like 'mama' and 'dada' and smile when spoken to. They may grasp small objects with their fingers and show excitement during playtime."
user_input = st.text_area("✍️ Enter child's behavioral traits and skills:", placeholder=placeholder_text)
def generate_pdf_report(ai_response):
pdf_file = "progress_report.pdf"
doc = SimpleDocTemplate(pdf_file, pagesize=A4)
styles = getSampleStyleSheet()
elements = [
Paragraph("Child Development Progress Report", styles['Title']),
Spacer(1, 12),
Paragraph("Development Insights:", styles['Heading2']),
Spacer(1, 10)
]
for part in ai_response.split('\n'):
part = part.strip().lstrip('0123456789.- ')
if part:
elements.append(Paragraph(f"• {part}", styles['Normal']))
elements.append(Spacer(1, 5))
disclaimer = ("This report is AI-generated and is for informational purposes only. "
"It should not be considered a substitute for professional medical advice. "
"Always consult a qualified pediatrician for expert guidance on your child's development.")
elements.append(Spacer(1, 12))
elements.append(Paragraph(disclaimer, styles['Italic']))
doc.build(elements)
return pdf_file
if st.button("🔍 Analyze", help="Click to analyze the child's development milestones"):
ai_response = generate_response(user_input, child_age)
st.subheader("📊 Development Insights:")
st.markdown(f"<div style='background-color:#44475a; color:#ffffff; padding: 15px; border-radius: 10px;'>{ai_response}</div>", unsafe_allow_html=True)
pdf_file = generate_pdf_report(ai_response)
with open(pdf_file, "rb") as f:
st.download_button(label="📥 Download Progress Report", data=f, file_name="progress_report.pdf", mime="application/pdf")
st.warning("⚠️ The results provided are generated by AI and should be interpreted with caution. Please consult a pediatrician for professional advice.")