mindspark121 commited on
Commit
01cd8f4
Β·
verified Β·
1 Parent(s): 5a89e45

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -5
app.py CHANGED
@@ -29,14 +29,16 @@ embedding_model = SentenceTransformer("all-MiniLM-L6-v2", cache_folder="/tmp/hug
29
  summarization_model = AutoModelForSeq2SeqLM.from_pretrained("google/long-t5-tglobal-base", cache_dir="/tmp/huggingface")
30
  summarization_tokenizer = AutoTokenizer.from_pretrained("google/long-t5-tglobal-base", cache_dir="/tmp/huggingface")
31
 
 
 
32
 
33
- # βœ… Load datasets
34
  try:
35
- recommendations_df = pd.read_csv("treatment_recommendations.csv")
36
- questions_df = pd.read_csv("symptom_questions.csv")
37
  except FileNotFoundError as e:
38
- logging.error(f"Missing dataset file: {e}")
39
- raise HTTPException(status_code=500, detail="Dataset files are missing.")
40
 
41
  # βœ… FAISS Index for disorder detection
42
  treatment_embeddings = similarity_model.encode(recommendations_df["Disorder"].tolist(), convert_to_numpy=True)
 
29
  summarization_model = AutoModelForSeq2SeqLM.from_pretrained("google/long-t5-tglobal-base", cache_dir="/tmp/huggingface")
30
  summarization_tokenizer = AutoTokenizer.from_pretrained("google/long-t5-tglobal-base", cache_dir="/tmp/huggingface")
31
 
32
+ # βœ… Check if files exist before loading
33
+ print("πŸ” Available Files:", os.listdir(".")) # This will log available files
34
 
35
+ # βœ… Load datasets with error handling
36
  try:
37
+ recommendations_df = pd.read_csv("treatment_recommendations.csv")
38
+ questions_df = pd.read_csv("symptom_questions.csv")
39
  except FileNotFoundError as e:
40
+ logging.error(f"❌ Missing dataset file: {e}")
41
+ raise HTTPException(status_code=500, detail=f"Dataset file not found: {str(e)}")
42
 
43
  # βœ… FAISS Index for disorder detection
44
  treatment_embeddings = similarity_model.encode(recommendations_df["Disorder"].tolist(), convert_to_numpy=True)