mciancone commited on
Commit
0ecf942
1 Parent(s): b205c50

Upload create_data_reranking.py

Browse files
Files changed (1) hide show
  1. create_data_reranking.py +78 -0
create_data_reranking.py ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import datasets
2
+ from sentence_transformers import SentenceTransformer, util
3
+ import torch
4
+ from huggingface_hub import create_repo
5
+ from huggingface_hub.utils._errors import HfHubHTTPError
6
+
7
+ """
8
+ To create a reranking dataset from the initial retrieval dataset,
9
+ we use a model (sentence-transformers/all-MiniLM-L6-v2) to embed the queries and the documents.
10
+ We then compute the cosine similarity for each query and document.
11
+ For each query we get the topk articles, as we would for a retrieval task.
12
+ Each couple query-document is labeled as relevant if it was labeled like so in the retrieval dataset,
13
+ or irrelevant if it was not
14
+ """
15
+ # Download the documents (corpus)
16
+ corpus_raw = datasets.load_dataset("lyon-nlp/mteb-fr-retrieval-syntec-s2p", "documents")
17
+ # Download the queries
18
+ queries_raw = datasets.load_dataset("lyon-nlp/mteb-fr-retrieval-syntec-s2p", "queries")
19
+ # Get the model
20
+ model = SentenceTransformer('sentence-transformers/all-MiniLM-L6-v2')
21
+
22
+ # Generate document text (title + content)
23
+ corpus = corpus_raw.map(lambda x: {"text": x["title"] + "\n\n" + x["content"]})
24
+ # Embed documents and queries
25
+ corpus = corpus.map(lambda x: {"embeddings": model.encode(x['text'])}, batched=True)
26
+ queries = queries_raw.map(lambda x: {"embeddings": model.encode(x["Question"])}, batched=True)
27
+
28
+ # change document id with index instead of name
29
+ doc_name_id_mapping = {doc["id"]: i for i, doc in enumerate(corpus["documents"])}
30
+ corpus = corpus.map(lambda x: {"doc_id" : doc_name_id_mapping[x["id"]]})
31
+ queries = queries.map(lambda x: {"doc_id": doc_name_id_mapping[x["Article"]]})
32
+
33
+
34
+ # Retrieve best documents by cosine similarity
35
+ def retrieve_documents(queries_embs, documents_embs, topk:int=10) -> torch.return_types.topk:
36
+ """Finds the topk documents for each embed query among all the embed documents
37
+
38
+ Args:
39
+ queries_embs (_type_): the embedings of all queries of the dataset (dataset["queries"]["embeddings"])
40
+ documents_embs (_type_): the embedings of all coprus of the dataset (dataset["corpus"]["embeddings"])
41
+ topk (int, optional): The amount of top documents to retrieve. Defaults to 5.
42
+
43
+ Returns:
44
+ torch.return_types.topk : The topk object, with topk.values being the cosine similarities
45
+ and the topk.indices being the indices of best documents for each queries
46
+ """
47
+ similarities = util.cos_sim(queries_embs, documents_embs)
48
+ tops = torch.topk(similarities, k=topk, axis=1)
49
+
50
+ return tops
51
+
52
+ top_docs = retrieve_documents(queries["queries"]["embeddings"], corpus["documents"]["embeddings"])
53
+ queries = queries.map(
54
+ lambda _, i: {"top_cosim_values": top_docs.values[i], "top_cosim_indexes": top_docs.indices[i]},
55
+ with_indices=True
56
+ )
57
+
58
+ # Remove id in best_indices if it corresponds to ground truth
59
+ queries = queries.map(lambda x : {"top_cosim_indexes": [i for i in x["top_cosim_indexes"] if i != x["doc_id"]]})
60
+ # Convert document ids to document texts based on the corpus
61
+ queries = queries.map(lambda x: {"negative": [corpus["documents"][i]["text"] for i in x["top_cosim_indexes"]]})
62
+ queries = queries.map(lambda x: {"positive": [corpus["documents"][x["doc_id"]]["text"]]})
63
+
64
+ # Format as the MTEB format
65
+ queries = queries.rename_column("Question", "query")
66
+ dataset = queries.remove_columns(['Article', 'embeddings', 'doc_id', 'top_cosim_values', 'top_cosim_indexes'])
67
+ # Rename the key of dataset key as "test"
68
+ dataset["test"] = dataset.pop("queries")
69
+
70
+ # create HF repo
71
+ repo_id = "lyon-nlp/mteb-fr-reranking-syntec-s2p"
72
+ try:
73
+ create_repo(repo_id, repo_type="dataset")
74
+ except HfHubHTTPError as e:
75
+ print("HF repo already exist")
76
+
77
+ # save dataset as json
78
+ dataset.push_to_hub(repo_id)