Update app.py
Browse files
app.py
CHANGED
@@ -29,14 +29,16 @@ embedding_model = SentenceTransformer("all-MiniLM-L6-v2", cache_folder="/tmp/hug
|
|
29 |
summarization_model = AutoModelForSeq2SeqLM.from_pretrained("google/long-t5-tglobal-base", cache_dir="/tmp/huggingface")
|
30 |
summarization_tokenizer = AutoTokenizer.from_pretrained("google/long-t5-tglobal-base", cache_dir="/tmp/huggingface")
|
31 |
|
|
|
|
|
32 |
|
33 |
-
# β
Load datasets
|
34 |
try:
|
35 |
-
|
36 |
-
|
37 |
except FileNotFoundError as e:
|
38 |
-
logging.error(f"Missing dataset file: {e}")
|
39 |
-
raise HTTPException(status_code=500, detail="Dataset
|
40 |
|
41 |
# β
FAISS Index for disorder detection
|
42 |
treatment_embeddings = similarity_model.encode(recommendations_df["Disorder"].tolist(), convert_to_numpy=True)
|
|
|
29 |
summarization_model = AutoModelForSeq2SeqLM.from_pretrained("google/long-t5-tglobal-base", cache_dir="/tmp/huggingface")
|
30 |
summarization_tokenizer = AutoTokenizer.from_pretrained("google/long-t5-tglobal-base", cache_dir="/tmp/huggingface")
|
31 |
|
32 |
+
# β
Check if files exist before loading
|
33 |
+
print("π Available Files:", os.listdir(".")) # This will log available files
|
34 |
|
35 |
+
# β
Load datasets with error handling
|
36 |
try:
|
37 |
+
recommendations_df = pd.read_csv("treatment_recommendations.csv")
|
38 |
+
questions_df = pd.read_csv("symptom_questions.csv")
|
39 |
except FileNotFoundError as e:
|
40 |
+
logging.error(f"β Missing dataset file: {e}")
|
41 |
+
raise HTTPException(status_code=500, detail=f"Dataset file not found: {str(e)}")
|
42 |
|
43 |
# β
FAISS Index for disorder detection
|
44 |
treatment_embeddings = similarity_model.encode(recommendations_df["Disorder"].tolist(), convert_to_numpy=True)
|