drkareemkamal commited on
Commit
c2ea418
·
verified ·
1 Parent(s): f83b33f

Upload 4 files

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ vectorstores/index.faiss filter=lfs diff=lfs merge=lfs -text
app.py ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #from langchain import PromptTemplate
2
+ from langchain_core.prompts import PromptTemplate
3
+ import os
4
+ from langchain_community.embeddings import HuggingFaceBgeEmbeddings
5
+ from langchain_community.vectorstores import FAISS
6
+ from langchain_community.llms.ctransformers import CTransformers
7
+ #from langchain.chains import RetrievalQA
8
+ from langchain.chains.retrieval_qa.base import RetrievalQA
9
+ import chainlit as cl
10
+
11
+ DB_FAISS_PATH = 'vectorstores/'
12
+
13
+ custom_prompt_template = '''use the following pieces of information to answer the user's questions.
14
+ If you don't know the answer, please just say that don't know the answer, don't try to make uo an answer.
15
+
16
+ Context : {context}
17
+ Question : {question}
18
+
19
+ only return the helpful answer below and nothing else.
20
+ '''
21
+
22
+ def set_custom_prompt():
23
+ """
24
+ Prompt template for QA retrieval for vector stores
25
+ """
26
+ prompt = PromptTemplate(template = custom_prompt_template,
27
+ input_variables = ['context','question'])
28
+
29
+ return prompt
30
+
31
+
32
+ def load_llm():
33
+ llm = CTransformers(
34
+ model = 'TheBloke/Llama-2-7B-Chat-GGML',
35
+ #model = AutoModel.from_pretrained("TheBloke/Llama-2-7B-Chat-GGML"),
36
+ model_type = 'llama',
37
+ max_new_token = 512,
38
+ temperature = 0.5
39
+ )
40
+ return llm
41
+
42
+ def retrieval_qa_chain(llm,prompt,db):
43
+ qa_chain = RetrievalQA.from_chain_type(
44
+ llm = llm,
45
+ chain_type = 'stuff',
46
+ retriever = db.as_retriever(search_kwargs= {'k': 2}),
47
+ return_source_documents = True,
48
+ chain_type_kwargs = {'prompt': prompt}
49
+ )
50
+
51
+ return qa_chain
52
+
53
+ def qa_bot():
54
+ embeddings = HuggingFaceBgeEmbeddings(model_name = 'sentence-transformers/all-MiniLM-L6-v2',
55
+ model_kwargs = {'device':'cpu'})
56
+
57
+
58
+ db = FAISS.load_local(DB_FAISS_PATH, embeddings,allow_dangerous_deserialization=True)
59
+ llm = load_llm()
60
+ qa_prompt = set_custom_prompt()
61
+ qa = retrieval_qa_chain(llm,qa_prompt, db)
62
+
63
+ return qa
64
+
65
+ def final_result(query):
66
+ qa_result = qa_bot()
67
+ response = qa_result({'query' : query})
68
+
69
+ return response
70
+
71
+
72
+ import streamlit as st
73
+
74
+ # Initialize the bot
75
+ bot = qa_bot()
76
+
77
+ def process_query(query):
78
+ # Here you would include the logic to process the query and return a response
79
+ response, sources = bot.answer_query(query) # Modify this according to your bot implementation
80
+ if sources:
81
+ response += f"\nSources: {', '.join(sources)}"
82
+ else:
83
+ response += "\nNo Sources Found"
84
+ return response
85
+
86
+ # Setting up the Streamlit app
87
+ st.title('Medical Chatbot')
88
+
89
+ user_input = st.text_input("Hi, welcome to the medical Bot. What is your query?")
90
+
91
+ if user_input:
92
+ output = process_query(user_input)
93
+ st.text_area("Response", output, height=300)
requirements.txt ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ pypdf
2
+ langchain
3
+ torch
4
+ accelerate
5
+ bitsandbytes
6
+ transformers
7
+ sentence_transformers
8
+ faiss_cpu
9
+ langchain-community
10
+ huggingface_hub
11
+ ctransformers
vectorstores/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ceac52af31d17a599afdeaa78b5309e58f242078efcd723b604cbdf2be45cb75
3
+ size 10983981
vectorstores/index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bad34adb5061873cd15a1f7e86541e9818502b235c53793e8b284884d77336dd
3
+ size 3446300