ChristophSchuhmann commited on
Commit
08d4748
1 Parent(s): c2114d5

Create README.md

Browse files
Files changed (1) hide show
  1. README.md +99 -0
README.md ADDED
@@ -0,0 +1,99 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import pandas as pd
3
+ from pathlib import Path
4
+
5
+
6
+ import retriv
7
+ retriv.set_base_path("./retriv_wiki_de")
8
+
9
+
10
+ from retriv import DenseRetriever
11
+
12
+
13
+ """
14
+ # Uncomment if you wanna make your own index
15
+ dr = DenseRetriever(
16
+ index_name="wiki_de-index_sentence_transf-BAAI/bge-m3_title_only_fullarticles",
17
+ model="BAAI/bge-m3",
18
+ normalize=True,
19
+ max_length=512,
20
+ use_ann=True,
21
+ )
22
+
23
+
24
+ dr = dr.index_file(
25
+ path="./wikipedia_de_filtered_fullarticles.csv", # File kind is automatically inferred
26
+ embeddings_path=None, # Default value
27
+ use_gpu=True, # Default value
28
+ batch_size=32, # Default value
29
+ show_progress=True, # Default value
30
+ callback=lambda doc: { # Callback defaults to None.
31
+ "id": doc["id"],
32
+ "text": doc["title"],
33
+ },
34
+ )
35
+ """
36
+
37
+ from retriv import DenseRetriever
38
+
39
+ # loading the wikipedia de text data
40
+ file_path = "./wikipedia_de_filtered_fullarticles.csv" # CSV with fulltext
41
+ df = pd.read_csv(file_path)
42
+
43
+ file_path = "./wikipedia_de_filtered_300wordchunks.csv" # CSV with fulltext
44
+ df2 = pd.read_csv(file_path)
45
+
46
+
47
+ # loading the retrievers
48
+ dr = DenseRetriever.load("wiki_de-index_sentence_transf-BAAI/bge-m3_title_only_fullarticles") # the embeddings here are made from the titles of the wikipedia pages, but can be matched to the full texts in the wikipedia_de_filtered_fullarticles.csv
49
+
50
+
51
+ result = dr.search(
52
+ query="was is der doppelspaltversuch?", # What to search for
53
+ return_docs=True, # Default value, return the text of the documents
54
+ cutoff=3, # Default value, number of results to return
55
+ )
56
+ print(df)
57
+
58
+ for res in result:
59
+
60
+ id_query = int(res["id"])-1
61
+ row = df.iloc[id_query]
62
+
63
+ print(row)
64
+ # Extracting 'text' and 'url' from the resulting row
65
+ result_text = row['text']
66
+ result_url = row['url']
67
+ print(result_url,result_text[:1000])
68
+
69
+
70
+
71
+ print("###################")
72
+ print("+++++++++++++++++++")
73
+
74
+
75
+
76
+ dr2 = DenseRetriever.load("wiki_de-index_sentence_transf-BAAI/bge-m3") # the embeddings here are made from 300 word segments of the articles. The IDs point to wikipedia_de_filtered_300wordchunks.csv
77
+
78
+
79
+
80
+ result = dr2.search(
81
+ query="was is der doppelspaltversuch?", # What to search for
82
+ return_docs=True, # Default value, return the text of the documents
83
+ cutoff=3, # Default value, number of results to return
84
+ )
85
+
86
+
87
+ for res in result:
88
+
89
+ id_query = int(res["id"])-1 # the "id" values start with 1, not 0 , -> need to substract 1 ;)
90
+ row = df2.iloc[id_query]
91
+
92
+ print(row)
93
+ # Extracting 'text' and 'url' from the resulting row
94
+ result_text = row['text']
95
+ result_url = row['url']
96
+ print(result_url,result_text)
97
+
98
+
99
+ print("########")