--- dataset_info: features: - name: id dtype: string - name: url dtype: string - name: title dtype: string - name: chunks sequence: string - name: embeddings sequence: sequence: float32 splits: - name: train num_bytes: 2580729273 num_examples: 534044 download_size: 2307703671 dataset_size: 2580729273 configs: - config_name: default data_files: - split: train path: data/train-* language: - cs size_categories: - 100K Load the data in Elasticsearch ```python def doc_generator(data, batch_size=1000): for batch in data.with_format("numpy").iter(batch_size): for i, id in enumerate(batch["id"]): output = {"id": id} output["title"] = batch["title"][i] output["url"] = batch["url"][i] output["parts"] = [ { "chunk": chunk, "embedding": embedding } for chunk, embedding in zip(batch["chunks"][i], batch["embeddings"][i]) ] yield output num_indexed, num_failed = 0, 0, progress = tqdm(total=ds.num_rows, unit="doc", desc="Indexing") for ok, info in parallel_bulk( es, index="wikipedia-search", actions=doc_generator(ds), raise_on_error=False, ): if not ok: print(f"ERROR {info['index']['status']}: {info['index']['error']}" progress.update(1) ```
Use sentence_transformers.util.semantic_search ```python import os import textwrap import sentence_transformers from sentence_transformers.models import Transformer, Pooling from sentence_transformers import SentenceTransformer from sentence_transformers.models import Transformer, Pooling embedding_model = Transformer("Seznam/simcse-dist-mpnet-paracrawl-cs-en") pooling = Pooling(word_embedding_dimension=embedding_model.get_word_embedding_dimension(), pooling_mode="cls") model = SentenceTransformer(modules=[embedding_model, pooling]) ds.set_format(type="torch", columns=["embeddings"], output_all_columns=True) # Flatten the dataset def explode_sequence(batch): output = { "id": [], "url": [], "title": [], "chunk": [], "embedding": [] } for id, url, title, chunks, embeddings in zip( batch["id"], batch["url"], batch["title"], batch["chunks"], batch["embeddings"] ): output["id"].extend([id for _ in range(len(chunks))]) output["url"].extend([url for _ in range(len(chunks))]) output["title"].extend([title for _ in range(len(chunks))]) output["chunk"].extend(chunks) output["embedding"].extend(embeddings) return output ds_flat = ds.map( explode_sequence, batched=True, remove_columns=ds.column_names, num_proc=min(os.cpu_count(), 32), desc="Flatten") ds_flat query = "Čím se zabývá fyzika?" hits = sentence_transformers.util.semantic_search( query_embeddings=model.encode(query), corpus_embeddings=ds_flat["embedding"], top_k=10) for hit in hits[0]: title = ds_flat[hit['corpus_id']]['title'] chunk = ds_flat[hit['corpus_id']]['chunk'] print(f"[{hit['score']:0.2f}] {textwrap.shorten(chunk, width=100, placeholder='…')} [{title}]") # [0.72] Molekulová fyzika ( též molekulární fyzika ) je část fyziky, která zkoumá látky na úrovni atomů a… [Molekulová fyzika] # [0.70] Fyzika ( z řeckého φυσικός ( fysikos ): přírodní, ze základu φύσις ( fysis ): příroda, archaicky… [Fyzika] # ... ```
The embeddings generation took about 35 minutes on an NVIDIA A100 80GB. ## License See license of the original dataset: .