Datasets:
File size: 6,139 Bytes
d52136f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 |
from rank_bm25 import BM25Plus
import datasets
from sklearn.base import BaseEstimator
from sklearn.model_selection import GridSearchCV
from huggingface_hub import create_repo
from huggingface_hub.utils._errors import HfHubHTTPError
N_NEGATIVE_DOCS = 10
SPLIT = "test"
# Prepare documents
def create_text(example:dict) -> str:
return "\n".join([example["title"], example["text"]])
documents = datasets.load_dataset("lyon-nlp/alloprof", "documents")["test"]
documents = documents.map(lambda x: {"text": create_text(x)})
documents = documents.rename_column("uuid", "doc_id")
documents = documents.remove_columns(["__index_level_0__", "title", "topic"])
# Prepare queries
queries = datasets.load_dataset("lyon-nlp/alloprof", "queries")[SPLIT]
queries = queries.rename_columns({"text": "queries", "relevant": "doc_id"})
queries = queries.remove_columns(["__index_level_0__", "answer", "id", "subject"])
# Optimize BM25 parameters
### Build sklearn estimator feature BM25
class BM25Estimator(BaseEstimator):
def __init__(self, corpus_dataset:datasets.Dataset, *, k1:float=1.5, b:float=.75, delta:int=1):
"""Initialize BM25 estimator using the coprus dataset.
The dataset must contain 2 columns:
- "doc_id" : the documents ids
- "text" : the document texts
Args:
corpus_dataset (datasets.Dataset): _description_
k1 (float, optional): _description_. Defaults to 1.5.
b (float, optional): _description_. Defaults to .75.
delta (int, optional): _description_. Defaults to 1.
"""
self.is_fitted_ = False
self.corpus_dataset = corpus_dataset
self.k1 = k1
self.b = b
self.delta=delta
self.bm25 = None
def tokenize_corpus(self, corpus:list[str]) -> list[str]:
"""Tokenize a corpus of strings
Args:
corpus (list[str]): the list of string to tokenize
Returns:
list[str]: the tokeinzed corpus
"""
if isinstance(corpus, str):
return corpus.lower().split()
return [c.lower().split() for c in corpus]
def fit(self, X=None, y=None):
"""Fits the BM25 using the dataset of documents
Args are placeholders required by sklearn
"""
tokenized_corpus = self.tokenize_corpus(self.corpus_dataset["text"])
self.bm25 = BM25Plus(
corpus=tokenized_corpus,
k1=self.k1,
b=self.b,
delta=self.delta
)
self.is_fitted_ = True
return self
def predict(self, query:str, topN:int=10) -> list[str]:
"""Returns the best doc ids in order of best relevance first
Args:
query (str): _description_
topN (int, optional): _description_. Defaults to 10.
Returns:
list[str]: _description_
"""
if not self.is_fitted_:
self.fit()
tokenized_query = self.tokenize_corpus(query)
best_docs = self.bm25.get_top_n(tokenized_query, self.corpus_dataset["text"], n=topN)
doc_text2id = dict(list(zip(self.corpus_dataset["text"], self.corpus_dataset["doc_id"])))
best_docs_ids = [doc_text2id[doc] for doc in best_docs]
return best_docs_ids
def score(self, queries:list[str], relevant_docs:list[list[str]]):
"""Scores the bm25 using the queries and relevant docs,
using MRR as the metric.
Args:
queries (list[str]): list of queries
relevant_docs (list[list[str]]): list of relevant documents ids for each query
"""
best_docs_ids_preds = [self.predict(q, N_NEGATIVE_DOCS) for q in queries]
best_docs_isrelevant = [
[
doc in rel_docs for doc in best_docs_ids_pred
]
for best_docs_ids_pred, rel_docs in zip(best_docs_ids_preds, relevant_docs)
]
mrrs = [self._compute_mrr(preds) for preds in best_docs_isrelevant]
mrr = sum(mrrs)/len(mrrs)
return mrr
def _compute_mrr(self, predictions:list[bool]) -> float:
"""Compute the mrr considering a list of boolean predictions.
Example:
if predictions = [False, False, True, False], it would indicate
that only the third document was labeled as relevant to the query
Args:
predictions (list[bool]): the binarized relevancy of predictions
Returns:
float: the mrr
"""
if any(predictions):
mrr = [1/(i+1) for i, pred in enumerate(predictions) if pred]
mrr = sum(mrr)/len(mrr)
return mrr
else:
return 0
### Perform gridSearch to find best parameters for BM25
print("Optimizing BM25 parameters...")
params = {
"k1":[1.25, 1.5, 1.75],
"b": [.5, .75, 1.],
"delta": [0, 1]
}
gscv = GridSearchCV(BM25Estimator(documents), params, verbose=1)
gscv.fit(queries["queries"], queries["doc_id"])
print("Best parameterss :", gscv.best_params_)
print("Best MRR score :", gscv.best_score_)
# Build reranking dataset with positives and negative queries using best estimator
print("Generating reranking dataset...")
reranking_dataset = datasets.Dataset.from_dict(
{
"query": queries["queries"],
"positive": queries["doc_id"],
"negative": [
[doc_id for doc_id in gscv.estimator.predict(q, N_NEGATIVE_DOCS) if doc_id not in relevant_ids]
for q, relevant_ids in zip(queries["queries"], queries["doc_id"])
]
})
# Push dataset to hub
### create HF repo
repo_id = "lyon-nlp/mteb-fr-reranking-alloprof-s2p"
try:
create_repo(repo_id, repo_type="dataset")
except HfHubHTTPError as e:
print("HF repo already exist")
### push to hub
reranking_dataset.push_to_hub(repo_id, config_name="queries", split=SPLIT)
documents.push_to_hub(repo_id, config_name="documents", split="test") |