File size: 3,205 Bytes
46f3c1e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
indexpath= "./wiki-index/knn.index"
wiki_sentence_path="wikipedia-en-sentences.parquet"
#wiki_fulltext_path="wikipedia-en.parquet"

import faiss
import glob
import numpy as np
import pandas as pd
pd.set_option("display.max_colwidth", 1000)

import nltk.data
import numpy as np
import time

import os

import torch
from transformers import AutoTokenizer, AutoModel

tokenizer = AutoTokenizer.from_pretrained('facebook/contriever-msmarco')
contriever = AutoModel.from_pretrained('facebook/contriever-msmarco')


device = 'cuda' if torch.cuda.is_available() else 'cpu'
contriever.to(device)




def cos_sim_2d(x, y):
    norm_x = x / np.linalg.norm(x, axis=1, keepdims=True)
    norm_y = y / np.linalg.norm(y, axis=1, keepdims=True)
    return np.matmul(norm_x, norm_y.T)



print(device)


# Mean pooling
def mean_pooling(token_embeddings, mask):
    token_embeddings = token_embeddings.masked_fill(~mask[..., None].bool(), 0.)
    sentence_embeddings = token_embeddings.sum(dim=1) / mask.sum(dim=1)[..., None]
    return sentence_embeddings
print("loading df")

df_sententces = pd.read_parquet( wiki_sentence_path , engine='fastparquet')
#df_fulltext = pd.read_parquet( wiki_fulltext_path , engine='fastparquet')


my_index = faiss.read_index(indexpath, faiss.IO_FLAG_MMAP | faiss.IO_FLAG_READ_ONLY)

query =""

while query != "q":

   query=input("Type in your query: ")
   print("Query Text: ", query)
   inputs = tokenizer([query], padding=True, truncation=True, return_tensors="pt").to(device)
   
   outputs = contriever(**inputs)
   embeddings = mean_pooling(outputs[0], inputs['attention_mask'])

   query_vector = np.asarray(embeddings .cpu().detach().numpy()).reshape(1, 768)

   #print(query_vector.shape)

   k = 5
   distances, indices = my_index.search(query_vector, k)

   print(f"Top {k} elements in the dataset for max inner product search:")
   for i, (dist, indice) in enumerate(zip(distances[0], indices[0])):
     print(f"{i+1}: Vector number {indice:4} with distance {dist}")

     text = str( df_sententces.iloc[[indice]]['text_snippet'] )
     # get embedding of neighboring 3-sentence segments
     try:
       inputs = tokenizer([str( df_sententces.iloc[[indice-1]]['text_snippet'] ), str( df_sententces.iloc[[indice]]['text_snippet']), str( df_sententces.iloc[[indice+1]]['text_snippet'] ) ], padding=True, truncation=True, return_tensors="pt").to(device)
       outputs = contriever(**inputs)
       embeddings = mean_pooling(outputs[0], inputs['attention_mask'])
       embeddings  = np.asarray(embeddings .cpu().detach().numpy())
       #print(embeddings.shape )
       #print(cos_sim_2d(embeddings[0].reshape(1, 768), embeddings[1].reshape(1, 768)))
       if cos_sim_2d(embeddings[0].reshape(1, 768), embeddings[1].reshape(1, 768)) > 0.7:
           text = str( df_sententces.iloc[[indice-1]]['text_snippet'] ) +" "+ str( df_sententces.iloc[[indice]]['text_snippet'] )

       #print(cos_sim_2d(embeddings[1].reshape(1, 768), embeddings[2].reshape(1, 768)))
       if cos_sim_2d(embeddings[0].reshape(1, 768), embeddings[1].reshape(1, 768)) > 0.7:
           text += str( df_sententces.iloc[[indice+1]]['text_snippet'] )
       
     except:
       pass

     print(text)