Commit
·
46f3c1e
1
Parent(s):
af2b393
Upload wikiindexquery.py with huggingface_hub
Browse files- wikiindexquery.py +97 -0
wikiindexquery.py
ADDED
@@ -0,0 +1,97 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
indexpath= "./wiki-index/knn.index"
|
2 |
+
wiki_sentence_path="wikipedia-en-sentences.parquet"
|
3 |
+
#wiki_fulltext_path="wikipedia-en.parquet"
|
4 |
+
|
5 |
+
import faiss
|
6 |
+
import glob
|
7 |
+
import numpy as np
|
8 |
+
import pandas as pd
|
9 |
+
pd.set_option("display.max_colwidth", 1000)
|
10 |
+
|
11 |
+
import nltk.data
|
12 |
+
import numpy as np
|
13 |
+
import time
|
14 |
+
|
15 |
+
import os
|
16 |
+
|
17 |
+
import torch
|
18 |
+
from transformers import AutoTokenizer, AutoModel
|
19 |
+
|
20 |
+
tokenizer = AutoTokenizer.from_pretrained('facebook/contriever-msmarco')
|
21 |
+
contriever = AutoModel.from_pretrained('facebook/contriever-msmarco')
|
22 |
+
|
23 |
+
|
24 |
+
device = 'cuda' if torch.cuda.is_available() else 'cpu'
|
25 |
+
contriever.to(device)
|
26 |
+
|
27 |
+
|
28 |
+
|
29 |
+
|
30 |
+
def cos_sim_2d(x, y):
|
31 |
+
norm_x = x / np.linalg.norm(x, axis=1, keepdims=True)
|
32 |
+
norm_y = y / np.linalg.norm(y, axis=1, keepdims=True)
|
33 |
+
return np.matmul(norm_x, norm_y.T)
|
34 |
+
|
35 |
+
|
36 |
+
|
37 |
+
print(device)
|
38 |
+
|
39 |
+
|
40 |
+
# Mean pooling
|
41 |
+
def mean_pooling(token_embeddings, mask):
|
42 |
+
token_embeddings = token_embeddings.masked_fill(~mask[..., None].bool(), 0.)
|
43 |
+
sentence_embeddings = token_embeddings.sum(dim=1) / mask.sum(dim=1)[..., None]
|
44 |
+
return sentence_embeddings
|
45 |
+
print("loading df")
|
46 |
+
|
47 |
+
df_sententces = pd.read_parquet( wiki_sentence_path , engine='fastparquet')
|
48 |
+
#df_fulltext = pd.read_parquet( wiki_fulltext_path , engine='fastparquet')
|
49 |
+
|
50 |
+
|
51 |
+
my_index = faiss.read_index(indexpath, faiss.IO_FLAG_MMAP | faiss.IO_FLAG_READ_ONLY)
|
52 |
+
|
53 |
+
query =""
|
54 |
+
|
55 |
+
while query != "q":
|
56 |
+
|
57 |
+
query=input("Type in your query: ")
|
58 |
+
print("Query Text: ", query)
|
59 |
+
inputs = tokenizer([query], padding=True, truncation=True, return_tensors="pt").to(device)
|
60 |
+
|
61 |
+
outputs = contriever(**inputs)
|
62 |
+
embeddings = mean_pooling(outputs[0], inputs['attention_mask'])
|
63 |
+
|
64 |
+
query_vector = np.asarray(embeddings .cpu().detach().numpy()).reshape(1, 768)
|
65 |
+
|
66 |
+
#print(query_vector.shape)
|
67 |
+
|
68 |
+
k = 5
|
69 |
+
distances, indices = my_index.search(query_vector, k)
|
70 |
+
|
71 |
+
print(f"Top {k} elements in the dataset for max inner product search:")
|
72 |
+
for i, (dist, indice) in enumerate(zip(distances[0], indices[0])):
|
73 |
+
print(f"{i+1}: Vector number {indice:4} with distance {dist}")
|
74 |
+
|
75 |
+
text = str( df_sententces.iloc[[indice]]['text_snippet'] )
|
76 |
+
# get embedding of neighboring 3-sentence segments
|
77 |
+
try:
|
78 |
+
inputs = tokenizer([str( df_sententces.iloc[[indice-1]]['text_snippet'] ), str( df_sententces.iloc[[indice]]['text_snippet']), str( df_sententces.iloc[[indice+1]]['text_snippet'] ) ], padding=True, truncation=True, return_tensors="pt").to(device)
|
79 |
+
outputs = contriever(**inputs)
|
80 |
+
embeddings = mean_pooling(outputs[0], inputs['attention_mask'])
|
81 |
+
embeddings = np.asarray(embeddings .cpu().detach().numpy())
|
82 |
+
#print(embeddings.shape )
|
83 |
+
#print(cos_sim_2d(embeddings[0].reshape(1, 768), embeddings[1].reshape(1, 768)))
|
84 |
+
if cos_sim_2d(embeddings[0].reshape(1, 768), embeddings[1].reshape(1, 768)) > 0.7:
|
85 |
+
text = str( df_sententces.iloc[[indice-1]]['text_snippet'] ) +" "+ str( df_sententces.iloc[[indice]]['text_snippet'] )
|
86 |
+
|
87 |
+
#print(cos_sim_2d(embeddings[1].reshape(1, 768), embeddings[2].reshape(1, 768)))
|
88 |
+
if cos_sim_2d(embeddings[0].reshape(1, 768), embeddings[1].reshape(1, 768)) > 0.7:
|
89 |
+
text += str( df_sententces.iloc[[indice+1]]['text_snippet'] )
|
90 |
+
|
91 |
+
except:
|
92 |
+
pass
|
93 |
+
|
94 |
+
print(text)
|
95 |
+
|
96 |
+
|
97 |
+
|