aisuko commited on
Commit
bbf209b
1 Parent(s): 84b35ff

update the README.md

Browse files
Files changed (1) hide show
  1. README.md +57 -0
README.md CHANGED
@@ -8,3 +8,60 @@ The original data from http://sbert.net/datasets/simplewiki-2020-11-01.jsonl.gz.
8
 
9
  We use `nq_distilbert-base-v1` model encode all the data to the PyTorch Tensors. And `normalize` the embeddings by using `sentence_transformers.util.normalize_embeddings`.
10
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8
 
9
  We use `nq_distilbert-base-v1` model encode all the data to the PyTorch Tensors. And `normalize` the embeddings by using `sentence_transformers.util.normalize_embeddings`.
10
 
11
+
12
+ ```python
13
+ !pip install sentence-transformers==2.3.1
14
+ ```
15
+
16
+
17
+ ```python
18
+
19
+ import os
20
+ import json
21
+ import gzip
22
+ from sentence_tranformers.util import http_get
23
+ from sentence_transformers import SentenceTransformer
24
+ from sentence_transformers.util import normalize_embeddings
25
+
26
+
27
+ os.environ['DATASET_NAME']='simplewiki-2020-11-01.jsonl.gz'
28
+ os.environ['DATASET_URL']='http://sbert.net/datasets/simplewiki-2020-11-01.jsonl.gz'
29
+
30
+ os.environ['MODEL_NAME']='multi-qa-MiniLM-L6-cos-v1'
31
+ os.environ['CROSS_CODE_NAME']='cross-encoder/ms-marco-MiniLM-L-6-v2'
32
+
33
+
34
+ http_get(os.getenv('DATASET_URL'), os.getenv('DATASET_NAME'))
35
+
36
+ passages=[]
37
+ with gzip.open(os.getenv('DATASET_NAME'), 'rt', encoding='utf-8') as fIn:
38
+ for line in fIn:
39
+ data=json.loads(line.strip())
40
+ # add all paragraphs
41
+ # passages.extend(data['paragraphs'])
42
+
43
+ # only add the first paragraph
44
+ # passages.append(data['paragraph'][0])
45
+
46
+ for paragraph in data['paragraphs']:
47
+ # We encode the passages as [title, text]
48
+ passages.append([data['title'], paragraph])
49
+
50
+ print('Passages:', len(passages))
51
+
52
+
53
+ bi_encoder=SentenceTransformer('nq-distilbert-base-v1')
54
+ bi_encoder.max_seq_length=256
55
+ bi_encoder.to('cuda')
56
+
57
+ corpus_embeddings=bi_encoder.encode(passages, convert_to_tensor=True, show_progress_bar=True).to('cuda')
58
+ corpus_embeddings=normalize_embeddings(corpus_embeddings)
59
+ len(corpus_embeddings)
60
+
61
+
62
+ import pandas as pd
63
+
64
+ embedding_data=pd.DataFrame(corpus_embeddings.cpu())
65
+ embedding_data.to_csv('simple_english_wikipedia_2020_11_01.csv', index=False)
66
+ ```
67
+