rag-homework / backend /semantic_search.py
Vadim Zhamkov
Fix generated DBs. Change model to Mistral-7B-Instruct-v0.3. Add reranking
7e00d2a
raw
history blame
1.43 kB
import lancedb
import os
import gradio as gr
from sentence_transformers import SentenceTransformer, CrossEncoder
from dotenv import load_dotenv
load_dotenv()
DB_PATH = os.getenv("DB_PATH", ".lancedb")
db = lancedb.connect(DB_PATH)
TABLE_NAME = os.getenv("TABLE_NAME")
if not TABLE_NAME:
raise ValueError("TABLE_NAME environment variable is not set")
TABLE = db.open_table(TABLE_NAME)
VECTOR_COLUMN = os.getenv("VECTOR_COLUMN", "vector")
TEXT_COLUMN = os.getenv("TEXT_COLUMN", "text")
BATCH_SIZE = int(os.getenv("BATCH_SIZE", 32))
retriever = SentenceTransformer(os.getenv("EMB_MODEL"))
def retrieve(query, k):
query_vec = retriever.encode(query)
try:
documents = TABLE.search(query_vec, vector_column_name=VECTOR_COLUMN).limit(k).to_list()
documents = [doc[TEXT_COLUMN] for doc in documents]
documents = reranking(query, documents)
return documents
except Exception as e:
raise gr.Error(str(e))
def reranking(query, retrieval_result):
model_name = 'BAAI/bge-reranker-large'
# model_name = 'cross-encoder/ms-marco-MiniLM-L-6-v2'
model = CrossEncoder(model_name, max_length=512)
# Prepare the list of tuples (query, document) for the model
pairs = [(query, curr) for curr in retrieval_result]
scores = model.predict(pairs)
scored_pairs = list(zip(scores, retrieval_result))
scored_pairs.sort(reverse=True)
return [pair[1] for pair in scored_pairs]