The dataset viewer is not available for this dataset.
Error code: ConfigNamesError Exception: DataFilesNotFoundError Message: No (supported) data files found in laion/wikipedia_de_retival_BGE-m3 Traceback: Traceback (most recent call last): File "/src/services/worker/src/worker/job_runners/dataset/config_names.py", line 73, in compute_config_names_response config_names = get_dataset_config_names( File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/inspect.py", line 347, in get_dataset_config_names dataset_module = dataset_module_factory( File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/load.py", line 1873, in dataset_module_factory raise e1 from None File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/load.py", line 1854, in dataset_module_factory return HubDatasetModuleFactoryWithoutScript( File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/load.py", line 1245, in get_module module_name, default_builder_kwargs = infer_module_for_data_files( File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/load.py", line 595, in infer_module_for_data_files raise DataFilesNotFoundError("No (supported) data files found" + (f" in {path}" if path else "")) datasets.exceptions.DataFilesNotFoundError: No (supported) data files found in laion/wikipedia_de_retival_BGE-m3
Need help to make the dataset viewer work? Open a discussion for direct support.
import os import pandas as pd from pathlib import Path
import retriv retriv.set_base_path("./retriv_wiki_de")
from retriv import DenseRetriever
"""
Uncomment if you wanna make your own index
dr = DenseRetriever(
index_name="wiki_de-index_sentence_transf-BAAI/bge-m3_title_only_fullarticles",
model="BAAI/bge-m3",
normalize=True,
max_length=512,
use_ann=True,
)
dr = dr.index_file(
path="./wikipedia_de_filtered_fullarticles.csv", # File kind is automatically inferred
embeddings_path=None, # Default value
use_gpu=True, # Default value
batch_size=32, # Default value
show_progress=True, # Default value
callback=lambda doc: { # Callback defaults to None.
"id": doc["id"],
"text": doc["title"],
},
)
"""
from retriv import DenseRetriever
loading the wikipedia de text data
file_path = "./wikipedia_de_filtered_fullarticles.csv" # CSV with fulltext df = pd.read_csv(file_path)
file_path = "./wikipedia_de_filtered_300wordchunks.csv" # CSV with fulltext df2 = pd.read_csv(file_path)
loading the retrievers
dr = DenseRetriever.load("wiki_de-index_sentence_transf-BAAI/bge-m3_title_only_fullarticles") # the embeddings here are made from the titles of the wikipedia pages, but can be matched to the full texts in the wikipedia_de_filtered_fullarticles.csv
result = dr.search(
query="was is der doppelspaltversuch?", # What to search for
return_docs=True, # Default value, return the text of the documents
cutoff=3, # Default value, number of results to return
)
print(df)
for res in result:
id_query = int(res["id"])-1 row = df.iloc[id_query]
print(row)
Extracting 'text' and 'url' from the resulting row
result_text = row['text'] result_url = row['url'] print(result_url,result_text[:1000])
print("###################") print("+++++++++++++++++++")
dr2 = DenseRetriever.load("wiki_de-index_sentence_transf-BAAI/bge-m3") # the embeddings here are made from 300 word segments of the articles. The IDs point to wikipedia_de_filtered_300wordchunks.csv
result = dr2.search(
query="was is der doppelspaltversuch?", # What to search for
return_docs=True, # Default value, return the text of the documents
cutoff=3, # Default value, number of results to return
)
for res in result:
id_query = int(res["id"])-1 # the "id" values start with 1, not 0 , -> need to substract 1 ;) row = df2.iloc[id_query]
print(row)
Extracting 'text' and 'url' from the resulting row
result_text = row['text'] result_url = row['url'] print(result_url,result_text)
print("########")
- Downloads last month
- 1