--- language: - cs license: - cc-by-sa-3.0 - gfdl size_categories: - 100K Load the data in Elasticsearch ```python def doc_generator(data, batch_size=1000): for batch in data.with_format("numpy").iter(batch_size): for i, id in enumerate(batch["id"]): output = {"id": id} output["title"] = batch["title"][i] output["url"] = batch["url"][i] output["parts"] = [ { "chunk": chunk, "embedding": embedding } for chunk, embedding in zip(batch["chunks"][i], batch["embeddings"][i]) ] yield output num_indexed, num_failed = 0, 0, progress = tqdm(total=ds.num_rows, unit="doc", desc="Indexing") for ok, info in parallel_bulk( es, index="wikipedia-search", actions=doc_generator(ds), raise_on_error=False, ): if not ok: print(f"ERROR {info['index']['status']}: {info['index']['error']}" progress.update(1) ```
Use sentence_transformers.util.semantic_search ```python import os import textwrap import sentence_transformers model = sentence_transformers.SentenceTransformer("Seznam/simcse-dist-mpnet-paracrawl-cs-en") ds.set_format(type="torch", columns=["embeddings"], output_all_columns=True) # Flatten the dataset def explode_sequence(batch): output = { "id": [], "url": [], "title": [], "chunk": [], "embedding": [] } for id, url, title, chunks, embeddings in zip( batch["id"], batch["url"], batch["title"], batch["chunks"], batch["embeddings"] ): output["id"].extend([id for _ in range(len(chunks))]) output["url"].extend([url for _ in range(len(chunks))]) output["title"].extend([title for _ in range(len(chunks))]) output["chunk"].extend(chunks) output["embedding"].extend(embeddings) return output ds_flat = ds.map( explode_sequence, batched=True, remove_columns=ds.column_names, num_proc=min(os.cpu_count(), 32), desc="Flatten") ds_flat query = "Čím se zabývá fyzika?" hits = sentence_transformers.util.semantic_search( query_embeddings=model.encode(query), corpus_embeddings=ds_flat["embedding"], top_k=10) for hit in hits[0]: title = ds_flat[hit['corpus_id']]['title'] chunk = ds_flat[hit['corpus_id']]['chunk'] print(f"[{hit['score']:0.2f}] {textwrap.shorten(chunk, width=100, placeholder='…')} [{title}]") # [0.58] Dynamika Fyzikální zákony [Newtonovy pohybové zákony] # [0.53] Teorie množin [Ordinální číslo] # ... ```
The embeddings generation took about 25 minutes on an NVIDIA RTX 4090 24GB. ## License See license of the original dataset: .