Spaces:
Sleeping
Sleeping
Vela
commited on
Commit
·
222b3f6
1
Parent(s):
159af50
updated database_response page
Browse files- .dockerignore +6 -0
- README.md +1 -1
- src/backend/data/__pycache__/chroma_db.cpython-313.pyc +0 -0
- src/backend/data/chroma_db.py +2 -2
- src/backend/models/__pycache__/llm_model.cpython-313.pyc +0 -0
- src/backend/models/__pycache__/schemas.cpython-313.pyc +0 -0
- src/backend/models/llm_model.py +8 -6
- src/backend/models/schemas.py +1 -1
- src/backend/routes/__pycache__/chat_api.cpython-313.pyc +0 -0
- src/backend/routes/chat_api.py +4 -1
- src/frontend/app/__pycache__/common_fuctions.cpython-313.pyc +0 -0
- src/frontend/app/__pycache__/homepage.cpython-313.pyc +0 -0
- src/frontend/app/common_fuctions.py +1 -1
- src/frontend/app/homepage.py +1 -1
- src/frontend/pages/database_response_page.py +21 -8
- src/frontend/utils/__pycache__/logger.cpython-313.pyc +0 -0
.dockerignore
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
.venv
|
2 |
+
.env
|
3 |
+
vector-db
|
4 |
+
src/backend/data/dataset.csv
|
5 |
+
project_requirement
|
6 |
+
logs
|
README.md
CHANGED
@@ -79,7 +79,7 @@ pip install -r requirements.txt
|
|
79 |
To run yuvabe_care_companion_ai, execute the following command:
|
80 |
|
81 |
```bash
|
82 |
-
streamlit run
|
83 |
```
|
84 |
|
85 |
This will start the Streamlit server, and you should see output indicating the local URL where the app is being served, typically `http://localhost:8501`.
|
|
|
79 |
To run yuvabe_care_companion_ai, execute the following command:
|
80 |
|
81 |
```bash
|
82 |
+
streamlit run src/frontend/home.py
|
83 |
```
|
84 |
|
85 |
This will start the Streamlit server, and you should see output indicating the local URL where the app is being served, typically `http://localhost:8501`.
|
src/backend/data/__pycache__/chroma_db.cpython-313.pyc
CHANGED
Binary files a/src/backend/data/__pycache__/chroma_db.cpython-313.pyc and b/src/backend/data/__pycache__/chroma_db.cpython-313.pyc differ
|
|
src/backend/data/chroma_db.py
CHANGED
@@ -7,10 +7,10 @@ logger = logger.get_logger()
|
|
7 |
|
8 |
# Constants
|
9 |
COLLECTION_NAME = "care_companion_ai_vectors"
|
10 |
-
DB_PATH = "vector-db"
|
11 |
|
12 |
# Initialize ChromaDB Client
|
13 |
-
client = chromadb.PersistentClient(path=
|
14 |
|
15 |
collection = client.get_or_create_collection(
|
16 |
name=COLLECTION_NAME,
|
|
|
7 |
|
8 |
# Constants
|
9 |
COLLECTION_NAME = "care_companion_ai_vectors"
|
10 |
+
DB_PATH = "./src/backend/vector-db"
|
11 |
|
12 |
# Initialize ChromaDB Client
|
13 |
+
client = chromadb.PersistentClient(path=DB_PATH)
|
14 |
|
15 |
collection = client.get_or_create_collection(
|
16 |
name=COLLECTION_NAME,
|
src/backend/models/__pycache__/llm_model.cpython-313.pyc
CHANGED
Binary files a/src/backend/models/__pycache__/llm_model.cpython-313.pyc and b/src/backend/models/__pycache__/llm_model.cpython-313.pyc differ
|
|
src/backend/models/__pycache__/schemas.cpython-313.pyc
CHANGED
Binary files a/src/backend/models/__pycache__/schemas.cpython-313.pyc and b/src/backend/models/__pycache__/schemas.cpython-313.pyc differ
|
|
src/backend/models/llm_model.py
CHANGED
@@ -22,12 +22,12 @@ SYSTEM_PROMPT = """You are Yuvabe Care Companion AI, an advanced healthcare assi
|
|
22 |
⚠️ *Important*: You are not a certified doctor. Always remind users to consult a healthcare professional for medical decisions.
|
23 |
"""
|
24 |
|
25 |
-
def get_medical_assistant_response(prompt:
|
26 |
try:
|
27 |
-
if not prompt or len(prompt
|
28 |
return "⚠️ Your question seems too short. Please provide more details so I can assist you better."
|
29 |
-
|
30 |
-
response = chroma_db.search_vector_store(
|
31 |
|
32 |
if response and "metadatas" in response and response["metadatas"]:
|
33 |
retrieved_contexts = [metadata['answer'] for metadata in response["metadatas"][0]]
|
@@ -35,7 +35,7 @@ def get_medical_assistant_response(prompt: str):
|
|
35 |
else:
|
36 |
context = "No relevant information found in the database."
|
37 |
|
38 |
-
|
39 |
You are a helpful medical assistant. Use the provided context to answer the question as accurately as possible.
|
40 |
If the context is not relevant, rely on your knowledge to answer.
|
41 |
|
@@ -44,11 +44,13 @@ def get_medical_assistant_response(prompt: str):
|
|
44 |
|
45 |
User Question: {prompt}
|
46 |
"""
|
|
|
|
|
47 |
|
48 |
chat_completion = client.chat.completions.create(
|
49 |
messages=[
|
50 |
{"role": "system", "content": SYSTEM_PROMPT},
|
51 |
-
{"role": "user", "content":
|
52 |
],
|
53 |
model=LLM_MODEL_NAME,
|
54 |
)
|
|
|
22 |
⚠️ *Important*: You are not a certified doctor. Always remind users to consult a healthcare professional for medical decisions.
|
23 |
"""
|
24 |
|
25 |
+
def get_medical_assistant_response(prompt: list):
|
26 |
try:
|
27 |
+
if not prompt or len(prompt[0]) < 5:
|
28 |
return "⚠️ Your question seems too short. Please provide more details so I can assist you better."
|
29 |
+
querry = prompt[-1]
|
30 |
+
response = chroma_db.search_vector_store(querry)
|
31 |
|
32 |
if response and "metadatas" in response and response["metadatas"]:
|
33 |
retrieved_contexts = [metadata['answer'] for metadata in response["metadatas"][0]]
|
|
|
35 |
else:
|
36 |
context = "No relevant information found in the database."
|
37 |
|
38 |
+
rag_prompt = f"""
|
39 |
You are a helpful medical assistant. Use the provided context to answer the question as accurately as possible.
|
40 |
If the context is not relevant, rely on your knowledge to answer.
|
41 |
|
|
|
44 |
|
45 |
User Question: {prompt}
|
46 |
"""
|
47 |
+
# messages.insert(0, {"role": "system", "content": SYSTEM_PROMPT})
|
48 |
+
# messages[-1]["content"] = rag_prompt
|
49 |
|
50 |
chat_completion = client.chat.completions.create(
|
51 |
messages=[
|
52 |
{"role": "system", "content": SYSTEM_PROMPT},
|
53 |
+
{"role": "user", "content": rag_prompt},
|
54 |
],
|
55 |
model=LLM_MODEL_NAME,
|
56 |
)
|
src/backend/models/schemas.py
CHANGED
@@ -2,5 +2,5 @@ from pydantic import BaseModel
|
|
2 |
from typing import List,Dict, Optional
|
3 |
|
4 |
class Chat_Response(BaseModel):
|
5 |
-
prompt: Optional[
|
6 |
response: Optional[Dict] = None
|
|
|
2 |
from typing import List,Dict, Optional
|
3 |
|
4 |
class Chat_Response(BaseModel):
|
5 |
+
prompt: Optional[List] = None
|
6 |
response: Optional[Dict] = None
|
src/backend/routes/__pycache__/chat_api.cpython-313.pyc
CHANGED
Binary files a/src/backend/routes/__pycache__/chat_api.cpython-313.pyc and b/src/backend/routes/__pycache__/chat_api.cpython-313.pyc differ
|
|
src/backend/routes/chat_api.py
CHANGED
@@ -21,7 +21,10 @@ async def get_assistant_response(chat_request: Chat_Response):
|
|
21 |
@router.post("/db_response")
|
22 |
async def get_db_response(chat_request: Chat_Response):
|
23 |
try:
|
24 |
-
|
|
|
|
|
|
|
25 |
return {"status": "success", "response": response_text}
|
26 |
except Exception as e:
|
27 |
logger.exception("Unexpected error occurred while processing the request.")
|
|
|
21 |
@router.post("/db_response")
|
22 |
async def get_db_response(chat_request: Chat_Response):
|
23 |
try:
|
24 |
+
logger.info(f"Received user prompt: {chat_request.prompt}")
|
25 |
+
query = chat_request.prompt[-1]
|
26 |
+
response_text = chroma_db.search_vector_store(query)
|
27 |
+
logger.info(f"Retrieved context for user prompt: {chat_request.prompt[:50]}...")
|
28 |
return {"status": "success", "response": response_text}
|
29 |
except Exception as e:
|
30 |
logger.exception("Unexpected error occurred while processing the request.")
|
src/frontend/app/__pycache__/common_fuctions.cpython-313.pyc
CHANGED
Binary files a/src/frontend/app/__pycache__/common_fuctions.cpython-313.pyc and b/src/frontend/app/__pycache__/common_fuctions.cpython-313.pyc differ
|
|
src/frontend/app/__pycache__/homepage.cpython-313.pyc
CHANGED
Binary files a/src/frontend/app/__pycache__/homepage.cpython-313.pyc and b/src/frontend/app/__pycache__/homepage.cpython-313.pyc differ
|
|
src/frontend/app/common_fuctions.py
CHANGED
@@ -19,7 +19,7 @@ def img_to_base64(image_path):
|
|
19 |
API_URL = os.getenv("API_URL", "http://127.0.0.1:8000")
|
20 |
|
21 |
|
22 |
-
def get_api_response(endpoint:str, prompt:
|
23 |
try:
|
24 |
response = requests.post(f"{API_URL}/{endpoint}", json={"prompt": prompt})
|
25 |
if response.status_code == 200:
|
|
|
19 |
API_URL = os.getenv("API_URL", "http://127.0.0.1:8000")
|
20 |
|
21 |
|
22 |
+
def get_api_response(endpoint:str, prompt: list):
|
23 |
try:
|
24 |
response = requests.post(f"{API_URL}/{endpoint}", json={"prompt": prompt})
|
25 |
if response.status_code == 200:
|
src/frontend/app/homepage.py
CHANGED
@@ -51,7 +51,7 @@ def handle_user_input():
|
|
51 |
with st.spinner("Processing your query..."):
|
52 |
try:
|
53 |
endpoint = "/chat/agent_response"
|
54 |
-
response = common_fuctions.get_api_response(endpoint, prompt)
|
55 |
except Exception as e:
|
56 |
logger.error(f"AI response generation failed: {str(e)}")
|
57 |
response = "⚠️ Sorry, I couldn't process your request. Please try again later."
|
|
|
51 |
with st.spinner("Processing your query..."):
|
52 |
try:
|
53 |
endpoint = "/chat/agent_response"
|
54 |
+
response = common_fuctions.get_api_response(endpoint, [prompt])
|
55 |
except Exception as e:
|
56 |
logger.error(f"AI response generation failed: {str(e)}")
|
57 |
response = "⚠️ Sorry, I couldn't process your request. Please try again later."
|
src/frontend/pages/database_response_page.py
CHANGED
@@ -2,15 +2,28 @@ import streamlit as st
|
|
2 |
from app import homepage
|
3 |
from app import common_fuctions
|
4 |
|
|
|
|
|
|
|
|
|
5 |
homepage.config_homepage()
|
6 |
st.title("Database Response")
|
7 |
st.write("This page is used to get the response from the database")
|
8 |
prompt = st.text_input("Enter your prompt")
|
9 |
-
if
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2 |
from app import homepage
|
3 |
from app import common_fuctions
|
4 |
|
5 |
+
SAMPLE_QUESTION_1 = "Last night I broke out in a sweat and got shaky then I felt like I was getting the flu so I went to bed at 7pm. I dont think I have the flu but I just ate two cookies and i got sweaty again. Im not feeling myself at all. I am 47 on hormones with a long history of family heart disease."
|
6 |
+
SAMPLE_QUESTION_2 = " I am feeling really weak today I was vomiting yesterday all day with a high heart rate and sweating like crazy. I thought I was just hungover but I don t think that s the case. I also have a small bump on my forehead that is swollen and tender to the touch."
|
7 |
+
SAMPLE_QUESTION_3 = "I just woke up now, It s 7pm. It was a bit of an offsleep, but when i woke up i felt really weak, shaky, almost lighter. I m still shaking now. I tried just waking up a bit, Drinking some water and milk, Went to the bathroom. But i still fell weak shaky and light"
|
8 |
+
|
9 |
homepage.config_homepage()
|
10 |
st.title("Database Response")
|
11 |
st.write("This page is used to get the response from the database")
|
12 |
prompt = st.text_input("Enter your prompt")
|
13 |
+
if prompt == "":
|
14 |
+
selected_option = st.selectbox("Select a sample question", ["",SAMPLE_QUESTION_1, SAMPLE_QUESTION_2, SAMPLE_QUESTION_3])
|
15 |
+
if selected_option not in ["", None]:
|
16 |
+
prompt = selected_option
|
17 |
+
if prompt:
|
18 |
+
if st.button("Get DB response"):
|
19 |
+
endpoint = "/chat/db_response"
|
20 |
+
response = common_fuctions.get_api_response(endpoint, [prompt])
|
21 |
+
st.subheader("✅ Relevant question and answer pair found in the database.")
|
22 |
+
for metadata_group in response["metadatas"]:
|
23 |
+
for entry in metadata_group:
|
24 |
+
st.write("Question:", entry["question"])
|
25 |
+
st.write("Answer:", entry["answer"])
|
26 |
+
st.write("-" * 80)
|
27 |
+
|
28 |
+
if st.button("Clear chat"):
|
29 |
+
st.rerun()
|
src/frontend/utils/__pycache__/logger.cpython-313.pyc
CHANGED
Binary files a/src/frontend/utils/__pycache__/logger.cpython-313.pyc and b/src/frontend/utils/__pycache__/logger.cpython-313.pyc differ
|
|