File size: 1,572 Bytes
94fa1c1 b3e4467 94fa1c1 b3e4467 c7b4492 b3e4467 c7b4492 b3e4467 30a3e00 b3e4467 30a3e00 b3e4467 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 |
---
license: apache-2.0
language:
- en
---
Only for the researching usage.
## The converting process below.
```python
# Setting the env
os.environ['DATASET_URL']='http://sbert.net/datasets/simplewiki-2020-11-01.jsonl.gz'
os.environ['MODEL_NAME']='multi-qa-MiniLM-L6-cos-v1'
# Loading the dataset
import json
import gzip
from sentence_transformers.util import http_get
http_get(os.getenv('DATASET_URL'), os.getenv('DATASET_NAME'))
passages=[]
with gzip.open(os.getenv('DATASET_NAME'), 'rt', encoding='utf8') as fIn:
for line in fIn:
data=json.loads(line.strip())
# add all paragraphs
# passages.extend(data['paragraphs'])
# only add the first paragraph
passages.append(data['paragraphs'][0])
# for paragraph in data['paragraphs']:
# # We encode the passages as [title, text]
# passages.append([data['title'], paragraph])
len(passages)
# Loading the model
from sentence_transformers import SentenceTransformer
bi_encoder=SentenceTransformer(os.getenv('MODEL_NAME'))
bi_encoder.max_seq_length=256
bi_encoder.to('cuda')
bi_encoder
# normalizing the embeddings
from sentence_transformers.util import normalize_embeddings
corpus_embeddings=bi_encoder.encode(passages, convert_to_tensor=True, show_progress_bar=True).to('cuda')
corpus_embeddings=normalize_embeddings(corpus_embeddings)
len(corpus_embeddings)
# save to the csv file
import pandas as pd
embeddings_data=pd.DataFrame(corpus_embeddings.cpu())
embeddings_data.to_csv('simple_english_wikipedia.csv', index=False)
``` |