aisuko's picture
Update README.md
30a3e00 verified
metadata
license: apache-2.0
language:
  - en

Only for the researching usage.

The converting process below.


# Setting the env
os.environ['DATASET_URL']='http://sbert.net/datasets/simplewiki-2020-11-01.jsonl.gz'
os.environ['MODEL_NAME']='multi-qa-MiniLM-L6-cos-v1'


# Loading the dataset
import json
import gzip

from sentence_transformers.util import http_get

http_get(os.getenv('DATASET_URL'), os.getenv('DATASET_NAME'))

passages=[]
with gzip.open(os.getenv('DATASET_NAME'), 'rt', encoding='utf8') as fIn:
    for line in fIn:
        data=json.loads(line.strip())
        # add all paragraphs
#         passages.extend(data['paragraphs'])

        # only add the first paragraph
        passages.append(data['paragraphs'][0])

#         for paragraph in data['paragraphs']:
#             # We encode the passages as [title, text]
#             passages.append([data['title'], paragraph])

len(passages)


# Loading the model
from sentence_transformers import SentenceTransformer

bi_encoder=SentenceTransformer(os.getenv('MODEL_NAME'))
bi_encoder.max_seq_length=256
bi_encoder.to('cuda')
bi_encoder


# normalizing the embeddings
from sentence_transformers.util import normalize_embeddings

corpus_embeddings=bi_encoder.encode(passages, convert_to_tensor=True, show_progress_bar=True).to('cuda')
corpus_embeddings=normalize_embeddings(corpus_embeddings)
len(corpus_embeddings)


# save to the csv file 

import pandas as pd

embeddings_data=pd.DataFrame(corpus_embeddings.cpu())
embeddings_data.to_csv('simple_english_wikipedia.csv', index=False)