Datasets:

Modalities:
Text
Formats:
parquet
Languages:
French
ArXiv:
Tags:
License:
mciancone commited on
Commit
d52136f
1 Parent(s): 65393d0

Upload build_reranking_dataset_BM25.py

Browse files
Files changed (1) hide show
  1. build_reranking_dataset_BM25.py +176 -0
build_reranking_dataset_BM25.py ADDED
@@ -0,0 +1,176 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from rank_bm25 import BM25Plus
2
+ import datasets
3
+ from sklearn.base import BaseEstimator
4
+ from sklearn.model_selection import GridSearchCV
5
+
6
+ from huggingface_hub import create_repo
7
+ from huggingface_hub.utils._errors import HfHubHTTPError
8
+
9
+
10
+ N_NEGATIVE_DOCS = 10
11
+ SPLIT = "test"
12
+
13
+ # Prepare documents
14
+ def create_text(example:dict) -> str:
15
+ return "\n".join([example["title"], example["text"]])
16
+
17
+ documents = datasets.load_dataset("lyon-nlp/alloprof", "documents")["test"]
18
+ documents = documents.map(lambda x: {"text": create_text(x)})
19
+ documents = documents.rename_column("uuid", "doc_id")
20
+ documents = documents.remove_columns(["__index_level_0__", "title", "topic"])
21
+
22
+ # Prepare queries
23
+ queries = datasets.load_dataset("lyon-nlp/alloprof", "queries")[SPLIT]
24
+ queries = queries.rename_columns({"text": "queries", "relevant": "doc_id"})
25
+ queries = queries.remove_columns(["__index_level_0__", "answer", "id", "subject"])
26
+
27
+ # Optimize BM25 parameters
28
+ ### Build sklearn estimator feature BM25
29
+ class BM25Estimator(BaseEstimator):
30
+
31
+ def __init__(self, corpus_dataset:datasets.Dataset, *, k1:float=1.5, b:float=.75, delta:int=1):
32
+ """Initialize BM25 estimator using the coprus dataset.
33
+ The dataset must contain 2 columns:
34
+ - "doc_id" : the documents ids
35
+ - "text" : the document texts
36
+
37
+ Args:
38
+ corpus_dataset (datasets.Dataset): _description_
39
+ k1 (float, optional): _description_. Defaults to 1.5.
40
+ b (float, optional): _description_. Defaults to .75.
41
+ delta (int, optional): _description_. Defaults to 1.
42
+ """
43
+ self.is_fitted_ = False
44
+
45
+ self.corpus_dataset = corpus_dataset
46
+ self.k1 = k1
47
+ self.b = b
48
+ self.delta=delta
49
+ self.bm25 = None
50
+
51
+ def tokenize_corpus(self, corpus:list[str]) -> list[str]:
52
+ """Tokenize a corpus of strings
53
+
54
+ Args:
55
+ corpus (list[str]): the list of string to tokenize
56
+
57
+ Returns:
58
+ list[str]: the tokeinzed corpus
59
+ """
60
+ if isinstance(corpus, str):
61
+ return corpus.lower().split()
62
+
63
+ return [c.lower().split() for c in corpus]
64
+
65
+ def fit(self, X=None, y=None):
66
+ """Fits the BM25 using the dataset of documents
67
+ Args are placeholders required by sklearn
68
+ """
69
+ tokenized_corpus = self.tokenize_corpus(self.corpus_dataset["text"])
70
+ self.bm25 = BM25Plus(
71
+ corpus=tokenized_corpus,
72
+ k1=self.k1,
73
+ b=self.b,
74
+ delta=self.delta
75
+ )
76
+ self.is_fitted_ = True
77
+
78
+ return self
79
+
80
+ def predict(self, query:str, topN:int=10) -> list[str]:
81
+ """Returns the best doc ids in order of best relevance first
82
+
83
+ Args:
84
+ query (str): _description_
85
+ topN (int, optional): _description_. Defaults to 10.
86
+
87
+ Returns:
88
+ list[str]: _description_
89
+ """
90
+ if not self.is_fitted_:
91
+ self.fit()
92
+
93
+ tokenized_query = self.tokenize_corpus(query)
94
+ best_docs = self.bm25.get_top_n(tokenized_query, self.corpus_dataset["text"], n=topN)
95
+ doc_text2id = dict(list(zip(self.corpus_dataset["text"], self.corpus_dataset["doc_id"])))
96
+ best_docs_ids = [doc_text2id[doc] for doc in best_docs]
97
+
98
+ return best_docs_ids
99
+
100
+ def score(self, queries:list[str], relevant_docs:list[list[str]]):
101
+ """Scores the bm25 using the queries and relevant docs,
102
+ using MRR as the metric.
103
+
104
+ Args:
105
+ queries (list[str]): list of queries
106
+ relevant_docs (list[list[str]]): list of relevant documents ids for each query
107
+ """
108
+ best_docs_ids_preds = [self.predict(q, N_NEGATIVE_DOCS) for q in queries]
109
+ best_docs_isrelevant = [
110
+ [
111
+ doc in rel_docs for doc in best_docs_ids_pred
112
+ ]
113
+ for best_docs_ids_pred, rel_docs in zip(best_docs_ids_preds, relevant_docs)
114
+ ]
115
+ mrrs = [self._compute_mrr(preds) for preds in best_docs_isrelevant]
116
+ mrr = sum(mrrs)/len(mrrs)
117
+
118
+ return mrr
119
+
120
+ def _compute_mrr(self, predictions:list[bool]) -> float:
121
+ """Compute the mrr considering a list of boolean predictions.
122
+ Example:
123
+ if predictions = [False, False, True, False], it would indicate
124
+ that only the third document was labeled as relevant to the query
125
+
126
+ Args:
127
+ predictions (list[bool]): the binarized relevancy of predictions
128
+
129
+ Returns:
130
+ float: the mrr
131
+ """
132
+ if any(predictions):
133
+ mrr = [1/(i+1) for i, pred in enumerate(predictions) if pred]
134
+ mrr = sum(mrr)/len(mrr)
135
+ return mrr
136
+ else:
137
+ return 0
138
+
139
+ ### Perform gridSearch to find best parameters for BM25
140
+ print("Optimizing BM25 parameters...")
141
+
142
+ params = {
143
+ "k1":[1.25, 1.5, 1.75],
144
+ "b": [.5, .75, 1.],
145
+ "delta": [0, 1]
146
+ }
147
+
148
+ gscv = GridSearchCV(BM25Estimator(documents), params, verbose=1)
149
+ gscv.fit(queries["queries"], queries["doc_id"])
150
+
151
+ print("Best parameterss :", gscv.best_params_)
152
+ print("Best MRR score :", gscv.best_score_)
153
+
154
+ # Build reranking dataset with positives and negative queries using best estimator
155
+ print("Generating reranking dataset...")
156
+ reranking_dataset = datasets.Dataset.from_dict(
157
+ {
158
+ "query": queries["queries"],
159
+ "positive": queries["doc_id"],
160
+ "negative": [
161
+ [doc_id for doc_id in gscv.estimator.predict(q, N_NEGATIVE_DOCS) if doc_id not in relevant_ids]
162
+ for q, relevant_ids in zip(queries["queries"], queries["doc_id"])
163
+ ]
164
+ })
165
+
166
+ # Push dataset to hub
167
+ ### create HF repo
168
+ repo_id = "lyon-nlp/mteb-fr-reranking-alloprof-s2p"
169
+ try:
170
+ create_repo(repo_id, repo_type="dataset")
171
+ except HfHubHTTPError as e:
172
+ print("HF repo already exist")
173
+
174
+ ### push to hub
175
+ reranking_dataset.push_to_hub(repo_id, config_name="queries", split=SPLIT)
176
+ documents.push_to_hub(repo_id, config_name="documents", split="test")