Spaces:
Sleeping
Sleeping
Deepak Sahu
commited on
Commit
·
060a333
1
Parent(s):
32b22f3
adding augmented generation
Browse files- app.py +3 -1
- z_generate.py +45 -2
app.py
CHANGED
@@ -16,7 +16,9 @@ Manually Downloaded as HTML files:
|
|
16 |
|
17 |
## Details
|
18 |
|
19 |
-
1. Vector Store is built using FAISS prior to starting this app. Although the vector store size in KBs but
|
|
|
|
|
20 |
|
21 |
'''
|
22 |
|
|
|
16 |
|
17 |
## Details
|
18 |
|
19 |
+
1. Vector Store is built using FAISS prior to starting this app. Although the vector store size in KBs but
|
20 |
+
- the creation and loading of the store takes processing takes ~10GB RAM and lasts 5 mins. Hence **NOT BUILDING IT DURING RUNTIME OF APP**.
|
21 |
+
- `multi_process=True` was not working with HF Space `free-tier`.
|
22 |
|
23 |
'''
|
24 |
|
z_generate.py
CHANGED
@@ -26,5 +26,48 @@ class ServerlessInference:
|
|
26 |
|
27 |
def perform_rag(self, query:str):
|
28 |
# First perform text search
|
29 |
-
|
30 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
26 |
|
27 |
def perform_rag(self, query:str):
|
28 |
# First perform text search
|
29 |
+
# Retrieval
|
30 |
+
retrieved_docs = self.vs_text.similarity_search(query=query, k=5)
|
31 |
+
retrieved_docs_text = [doc.page_content for doc in retrieved_docs] # We only need the text of the documents
|
32 |
+
context = "\nExtracted documents:\n"
|
33 |
+
context += "".join([f"Document {str(i)}:::\n" + doc for i, doc in enumerate(retrieved_docs_text)])
|
34 |
+
|
35 |
+
# Augmented Generation
|
36 |
+
messages:str = [
|
37 |
+
{
|
38 |
+
"role": "system",
|
39 |
+
"content": """Using the information contained in the context,
|
40 |
+
|
41 |
+
give a comprehensive answer to the question.
|
42 |
+
|
43 |
+
Respond only to the question asked, response should be concise and relevant to the question.
|
44 |
+
|
45 |
+
Provide the number of the source document when relevant.
|
46 |
+
|
47 |
+
If the answer cannot be deduced from the context, do not give an answer.""",
|
48 |
+
|
49 |
+
},
|
50 |
+
|
51 |
+
{
|
52 |
+
"role": "user",
|
53 |
+
"content": """Context:
|
54 |
+
|
55 |
+
{context}
|
56 |
+
|
57 |
+
---
|
58 |
+
|
59 |
+
Now here is the question you need to answer.
|
60 |
+
|
61 |
+
Question: {question}""".format(context=context, question=query),
|
62 |
+
|
63 |
+
},
|
64 |
+
]
|
65 |
+
|
66 |
+
completion = self.client.chat.completions.create(
|
67 |
+
model=self.model,
|
68 |
+
messages=messages,
|
69 |
+
max_tokens=500
|
70 |
+
)
|
71 |
+
|
72 |
+
response_text = completion.choices[0].message.content
|
73 |
+
return response_text
|