removing Thread
Browse files
app.py
CHANGED
@@ -32,12 +32,14 @@ from huggingface_hub import login
|
|
32 |
|
33 |
login(token=HF_KEY)
|
34 |
|
|
|
|
|
35 |
class BSIChatbot:
|
36 |
def __init__(self, model_paths: Dict[str, str], docs_path: str):
|
37 |
self.embedding_model = None
|
38 |
self.llmpipeline = None
|
39 |
self.llmtokenizer = None
|
40 |
-
self.vectorstore = None
|
41 |
self.reranking_model = None
|
42 |
self.streamer = None
|
43 |
self.images = [None]
|
@@ -50,7 +52,7 @@ class BSIChatbot:
|
|
50 |
@spaces.GPU
|
51 |
def initialize_embedding_model(self, rebuild_embeddings: bool):
|
52 |
raw_knowledge_base = []
|
53 |
-
|
54 |
# Initialize embedding model
|
55 |
self.embedding_model = HuggingFaceEmbeddings(
|
56 |
model_name=self.word_and_embed_model_path,
|
@@ -91,18 +93,19 @@ class BSIChatbot:
|
|
91 |
self.vectorstore.save_local(os.path.join(self.docs, "_embeddings"))
|
92 |
else:
|
93 |
# Load existing vector store
|
94 |
-
|
95 |
print("DBG: Vectorstore Status Initialization:", self.vectorstore)
|
96 |
|
97 |
@spaces.GPU
|
98 |
def retrieve_similar_embedding(self, query: str):
|
|
|
99 |
#lazy load
|
100 |
#if (self.vectorstore == None):
|
101 |
# self.vectorstore = FAISS.load_local(os.path.join(self.docs, "_embeddings"), self.embedding_model,
|
102 |
# allow_dangerous_deserialization=True)
|
103 |
print("DBG: Vectorstore Status retriever:", self.vectorstore)
|
104 |
query = f"Instruct: Given a search query, retrieve the relevant passages that answer the query\nQuery:{query}"
|
105 |
-
return
|
106 |
|
107 |
@spaces.GPU
|
108 |
def initialize_llm(self):
|
|
|
32 |
|
33 |
login(token=HF_KEY)
|
34 |
|
35 |
+
vectorstore=None
|
36 |
+
|
37 |
class BSIChatbot:
|
38 |
def __init__(self, model_paths: Dict[str, str], docs_path: str):
|
39 |
self.embedding_model = None
|
40 |
self.llmpipeline = None
|
41 |
self.llmtokenizer = None
|
42 |
+
#self.vectorstore = None
|
43 |
self.reranking_model = None
|
44 |
self.streamer = None
|
45 |
self.images = [None]
|
|
|
52 |
@spaces.GPU
|
53 |
def initialize_embedding_model(self, rebuild_embeddings: bool):
|
54 |
raw_knowledge_base = []
|
55 |
+
global vectorstore
|
56 |
# Initialize embedding model
|
57 |
self.embedding_model = HuggingFaceEmbeddings(
|
58 |
model_name=self.word_and_embed_model_path,
|
|
|
93 |
self.vectorstore.save_local(os.path.join(self.docs, "_embeddings"))
|
94 |
else:
|
95 |
# Load existing vector store
|
96 |
+
vectorstore = FAISS.load_local(os.path.join(self.docs, "_embeddings"), self.embedding_model, allow_dangerous_deserialization=True)
|
97 |
print("DBG: Vectorstore Status Initialization:", self.vectorstore)
|
98 |
|
99 |
@spaces.GPU
|
100 |
def retrieve_similar_embedding(self, query: str):
|
101 |
+
global vectorstore
|
102 |
#lazy load
|
103 |
#if (self.vectorstore == None):
|
104 |
# self.vectorstore = FAISS.load_local(os.path.join(self.docs, "_embeddings"), self.embedding_model,
|
105 |
# allow_dangerous_deserialization=True)
|
106 |
print("DBG: Vectorstore Status retriever:", self.vectorstore)
|
107 |
query = f"Instruct: Given a search query, retrieve the relevant passages that answer the query\nQuery:{query}"
|
108 |
+
return vectorstore.similarity_search(query=query, k=20)
|
109 |
|
110 |
@spaces.GPU
|
111 |
def initialize_llm(self):
|