Oleh Shliazhko commited on
Commit
365eb64
1 Parent(s): f41f058

data and retriever module

Browse files
.gitattributes CHANGED
@@ -52,3 +52,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
52
  *.jpg filter=lfs diff=lfs merge=lfs -text
53
  *.jpeg filter=lfs diff=lfs merge=lfs -text
54
  *.webp filter=lfs diff=lfs merge=lfs -text
 
 
52
  *.jpg filter=lfs diff=lfs merge=lfs -text
53
  *.jpeg filter=lfs diff=lfs merge=lfs -text
54
  *.webp filter=lfs diff=lfs merge=lfs -text
55
+ *.idx filter=lfs diff=lfs merge=lfs -text
data/en/embs_IVF16384_HNSW32_2lvl_full.idx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a4c8a9b6f6233a8927bc3981a9a84a86ab300cd4d6c9c381132fb9bcee82f3d4
3
+ size 17268102277
data/en/paragraphs.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:01430661566694a84fd77b7a9a9524525a53e87b1b6fe937f271f37a2e73678d
3
+ size 7231592115
wiki_mpnet_index.py ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import faiss
2
+ import json
3
+ import numpy as np
4
+ import zipfile
5
+
6
+ from sentence_transformers import SentenceTransformer
7
+
8
+
9
+ class Retriever:
10
+ def __init__(self):
11
+ self.index = faiss.read_index("data/en/embs_IVF16384_HNSW32_2lvl_full.idx")
12
+ self.index.nprobe = 128
13
+ self.model = SentenceTransformer('sentence-transformers/all-mpnet-base-v2', device='cuda')
14
+ self.model.max_seq_length = 512
15
+
16
+
17
+ def get_paragraph_by_vec_idx(self, vec_idx):
18
+ archive = zipfile.ZipFile('data/en/paragraphs.zip', 'r')
19
+ chunk_id = vec_idx // 100000
20
+ line_id = vec_idx % 100000
21
+ with archive.open('enwiki_paragraphs_clean/enwiki_paragraphs_%03d.jsonl' % chunk_id) as f:
22
+ for i,l in enumerate(f):
23
+ if i == line_id:
24
+ data = json.loads(l)
25
+ break
26
+ return data
27
+
28
+
29
+ def search(self, query, k=5):
30
+ emb = self.model.encode(query)
31
+ _, neighbors = self.index.search(emb[np.newaxis, ...], k)
32
+ results = []
33
+ for n in neighbors[0]:
34
+ data = get_paragraph_by_vec_idx(n)
35
+ results.append(data)
36
+ return results