philipphager commited on
Commit
85a9dc3
1 Parent(s): e9faeb0

Create baidu-ultr_uva-mlm-ctr.py

Browse files
Files changed (1) hide show
  1. baidu-ultr_uva-mlm-ctr.py +217 -0
baidu-ultr_uva-mlm-ctr.py ADDED
@@ -0,0 +1,217 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from enum import Enum
2
+ from typing import List
3
+
4
+ import datasets
5
+ import pandas as pd
6
+
7
+ from datasets import Features, Value, Array2D, Sequence, SplitGenerator, Split
8
+
9
+
10
+ _CITATION = """\
11
+ @InProceedings{huggingface:dataset,
12
+ title = {philipphager/baidu-ultr_baidu-mlm-ctr},
13
+ author={Philipp Hager, Romain Deffayet},
14
+ year={2023}
15
+ }
16
+ """
17
+
18
+ _DESCRIPTION = """\
19
+ Query-document vectors and clicks for a subset of the [Baidu Unbiased Learning to Rank dataset](https://arxiv.org/abs/2207.03051).
20
+ This dataset uses a BERT cross-encoder with 12 layers trained on a Masked Language Modeling (MLM) and click-through-rate (CTR) prediction task to compute query-document vectors (768 dims).
21
+
22
+ The model is available under `model/`.
23
+ """
24
+
25
+ _HOMEPAGE = "https://huggingface.co/datasets/philipphager/baidu-ultr_baidu-mlm-ctr/"
26
+ _LICENSE = "cc-by-nc-4.0"
27
+ _VERSION = "0.1.0"
28
+
29
+
30
+ class Config(str, Enum):
31
+ ANNOTATIONS = "annotations"
32
+ CLICKS = "clicks"
33
+
34
+
35
+ class BaiduUltrBuilder(datasets.GeneratorBasedBuilder):
36
+ VERSION = datasets.Version(_VERSION)
37
+ BUILDER_CONFIGS = [
38
+ datasets.BuilderConfig(
39
+ name=Config.CLICKS,
40
+ version=VERSION,
41
+ description="Load train/val/test clicks from the Baidu ULTR dataset",
42
+ ),
43
+ datasets.BuilderConfig(
44
+ name=Config.ANNOTATIONS,
45
+ version=VERSION,
46
+ description="Load expert annotations from the Baidu ULTR dataset",
47
+ ),
48
+ ]
49
+
50
+ CLICK_FEATURES = Features(
51
+ {
52
+ "query_id": Value("string"),
53
+ "query_md5": Value("string"),
54
+ "url_md5": Sequence(Value("string")),
55
+ "text_md5": Sequence(Value("string")),
56
+ "query_document_embedding": Array2D((None, 768), "float16"),
57
+ "click": Sequence(Value("int32")),
58
+ "n": Value("int32"),
59
+ "position": Sequence(Value("int32")),
60
+ "media_type": Sequence(Value("int32")),
61
+ "displayed_time": Sequence(Value("float32")),
62
+ "serp_height": Sequence(Value("int32")),
63
+ "slipoff_count_after_click": Sequence(Value("int32")),
64
+ }
65
+ )
66
+
67
+ ANNOTATION_FEATURES = Features(
68
+ {
69
+ "query_id": Value("string"),
70
+ "query_md5": Value("string"),
71
+ "text_md5": Value("string"),
72
+ "query_document_embedding": Array2D((None, 768), "float16"),
73
+ "label": Sequence(Value("int32")),
74
+ "n": Value("int32"),
75
+ "frequency_bucket": Value("int32"),
76
+ }
77
+ )
78
+
79
+ DEFAULT_CONFIG_NAME = Config.CLICKS
80
+
81
+ def _info(self):
82
+ if self.config.name == Config.CLICKS:
83
+ features = self.CLICK_FEATURES
84
+ elif self.config.name == Config.ANNOTATIONS:
85
+ features = self.ANNOTATION_FEATURES
86
+ else:
87
+ raise ValueError(
88
+ f"Config {self.config.name} must be in ['clicks', 'annotations']"
89
+ )
90
+
91
+ return datasets.DatasetInfo(
92
+ description=_DESCRIPTION,
93
+ features=features,
94
+ homepage=_HOMEPAGE,
95
+ license=_LICENSE,
96
+ citation=_CITATION,
97
+ )
98
+
99
+ def _split_generators(self, dl_manager):
100
+ if self.config.name == Config.CLICKS:
101
+ train_files = self.download_clicks(dl_manager, parts=[1, 2, 3])
102
+ test_files = self.download_clicks(dl_manager, parts=[0])
103
+
104
+ query_columns = [
105
+ "query_id",
106
+ "query_md5",
107
+ ]
108
+
109
+ agg_columns = [
110
+ "query_md5",
111
+ "url_md5",
112
+ "text_md5",
113
+ "position",
114
+ "click",
115
+ "query_document_embedding",
116
+ "media_type",
117
+ "displayed_time",
118
+ "serp_height",
119
+ "slipoff_count_after_click",
120
+ ]
121
+
122
+ return [
123
+ SplitGenerator(
124
+ name=Split.TRAIN,
125
+ gen_kwargs={
126
+ "files": train_files,
127
+ "query_columns": query_columns,
128
+ "agg_columns": agg_columns,
129
+ },
130
+ ),
131
+ SplitGenerator(
132
+ name=Split.TEST,
133
+ gen_kwargs={
134
+ "files": test_files,
135
+ "query_columns": query_columns,
136
+ "agg_columns": agg_columns,
137
+ },
138
+ ),
139
+ ]
140
+ elif self.config.name == Config.ANNOTATIONS:
141
+ test_files = dl_manager.download(["parts/validation.feather"])
142
+ query_columns = [
143
+ "query_id",
144
+ "query_md5",
145
+ "frequency_bucket",
146
+ ]
147
+ agg_columns = [
148
+ "text_md5",
149
+ "label",
150
+ "query_document_embedding",
151
+ ]
152
+
153
+ return [
154
+ SplitGenerator(
155
+ name=Split.TEST,
156
+ gen_kwargs={
157
+ "files": test_files,
158
+ "query_columns": query_columns,
159
+ "agg_columns": agg_columns,
160
+ },
161
+ )
162
+ ]
163
+ else:
164
+ raise ValueError("Config name must be in ['clicks', 'annotations']")
165
+
166
+ def download_clicks(self, dl_manager, parts: List[int], splits_per_part: int = 10):
167
+ urls = [
168
+ f"parts/part-{p}_split-{s}.feather"
169
+ for p in parts
170
+ for s in range(splits_per_part)
171
+ ]
172
+
173
+ return dl_manager.download(urls)
174
+
175
+ def _generate_examples(
176
+ self,
177
+ files: List[str],
178
+ query_columns: List[str],
179
+ agg_columns: List[str],
180
+ ):
181
+ """
182
+ Reads dataset partitions and aggregates document features per query.
183
+ :param files: List of .feather files to load from disk.
184
+ :param query_columns: Columns with one value per query. E.g., query_id,
185
+ frequency bucket, etc.
186
+ :param agg_columns: Columns with one value per document that should be
187
+ aggregated per query. E.g., click, position, query_document_embeddings, etc.
188
+ :return:
189
+ """
190
+ for file in files:
191
+ df = pd.read_feather(file)
192
+ current_query_id = None
193
+ sample_key = None
194
+ sample = None
195
+
196
+ for i in range(len(df)):
197
+ row = df.iloc[i]
198
+
199
+ if current_query_id != row["query_id"]:
200
+ if current_query_id is not None:
201
+ yield sample_key, sample
202
+
203
+ current_query_id = row["query_id"]
204
+ sample_key = f"{file}-{current_query_id}"
205
+ sample = {"n": 0}
206
+
207
+ for column in query_columns:
208
+ sample[column] = row[column]
209
+ for column in agg_columns:
210
+ sample[column] = []
211
+
212
+ for column in agg_columns:
213
+ sample[column].append(row[column])
214
+
215
+ sample["n"] += 1
216
+
217
+ yield sample_key, sample