File size: 2,702 Bytes
08d4748
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
import os
import pandas as pd
from pathlib import Path


import retriv
retriv.set_base_path("./retriv_wiki_de")


from retriv import DenseRetriever


"""
# Uncomment if you wanna make your own index
dr = DenseRetriever(
  index_name="wiki_de-index_sentence_transf-BAAI/bge-m3_title_only_fullarticles",
  model="BAAI/bge-m3",  
  normalize=True,
  max_length=512,
  use_ann=True,
)


dr = dr.index_file(
  path="./wikipedia_de_filtered_fullarticles.csv",  # File kind is automatically inferred
  embeddings_path=None,       # Default value
  use_gpu=True,              # Default value
  batch_size=32,             # Default value
  show_progress=True,         # Default value
  callback=lambda doc: {      # Callback defaults to None.
    "id": doc["id"],
    "text": doc["title"],          
  },
)
"""

from retriv import DenseRetriever

# loading the wikipedia de text data 
file_path = "./wikipedia_de_filtered_fullarticles.csv"   # CSV with fulltext
df = pd.read_csv(file_path)

file_path = "./wikipedia_de_filtered_300wordchunks.csv"   # CSV with fulltext
df2 = pd.read_csv(file_path)


# loading the retrievers
dr = DenseRetriever.load("wiki_de-index_sentence_transf-BAAI/bge-m3_title_only_fullarticles")  # the embeddings here are made from the titles of the wikipedia pages, but can be matched to the full texts in the wikipedia_de_filtered_fullarticles.csv


result =  dr.search(
  query="was is der doppelspaltversuch?",    # What to search for        
  return_docs=True,          # Default value, return the text of the documents
  cutoff=3,                # Default value, number of results to return
)
print(df)

for res in result:

  id_query = int(res["id"])-1
  row = df.iloc[id_query]

  print(row)
  # Extracting 'text' and 'url' from the resulting row
  result_text = row['text']
  result_url = row['url']
  print(result_url,result_text[:1000])



print("###################")
print("+++++++++++++++++++")



dr2 = DenseRetriever.load("wiki_de-index_sentence_transf-BAAI/bge-m3") # the embeddings here are made from 300 word segments of the articles. The IDs point to wikipedia_de_filtered_300wordchunks.csv



result =  dr2.search(
  query="was is der doppelspaltversuch?",    # What to search for        
  return_docs=True,          # Default value, return the text of the documents
  cutoff=3,                # Default value, number of results to return
)


for res in result:

  id_query = int(res["id"])-1    # the "id" values start with 1, not 0 , -> need to substract 1 ;)
  row = df2.iloc[id_query]

  print(row)
  # Extracting 'text' and 'url' from the resulting row
  result_text = row['text']
  result_url = row['url']
  print(result_url,result_text)


print("########")