Datasets:
ArXiv:
License:
from enum import Enum | |
from typing import List | |
import datasets | |
import pandas as pd | |
from datasets import Features, Value, Array2D, Sequence, SplitGenerator, Split | |
_CITATION = """\ | |
@InProceedings{huggingface:dataset, | |
title = {philipphager/baidu-ultr_baidu-mlm-ctr}, | |
author={Philipp Hager, Romain Deffayet}, | |
year={2023} | |
} | |
""" | |
_DESCRIPTION = """\ | |
Query-document vectors and clicks for a subset of the Baidu Unbiased Learning to Rank | |
dataset: https://arxiv.org/abs/2207.03051 | |
This dataset uses the BERT cross-encoder with 12 layers from Baidu released | |
in the official starter-kit to compute query-document vectors (768 dims): | |
https://github.com/ChuXiaokai/baidu_ultr_dataset/ | |
We link the model checkpoint also under `model/`. | |
""" | |
_HOMEPAGE = "https://huggingface.co/datasets/philipphager/baidu-ultr_baidu-mlm-ctr/" | |
_LICENSE = "cc-by-nc-4.0" | |
_VERSION = "0.1.0" | |
class Config(str, Enum): | |
ANNOTATIONS = "annotations" | |
CLICKS = "clicks" | |
class BaiduUltrBuilder(datasets.GeneratorBasedBuilder): | |
VERSION = datasets.Version(_VERSION) | |
BUILDER_CONFIGS = [ | |
datasets.BuilderConfig( | |
name=Config.CLICKS, | |
version=VERSION, | |
description="Load train/val/test clicks from the Baidu ULTR dataset", | |
), | |
datasets.BuilderConfig( | |
name=Config.ANNOTATIONS, | |
version=VERSION, | |
description="Load expert annotations from the Baidu ULTR dataset", | |
), | |
] | |
CLICK_FEATURES = Features( | |
{ | |
"query_id": Value("string"), | |
"query_md5": Value("string"), | |
"url_md5": Sequence(Value("string")), | |
"text_md5": Sequence(Value("string")), | |
"query_document_embedding": Array2D((None, 768), "float16"), | |
"click": Sequence(Value("int32")), | |
"n": Value("int32"), | |
"position": Sequence(Value("int32")), | |
"media_type": Sequence(Value("int32")), | |
"displayed_time": Sequence(Value("float32")), | |
"serp_height": Sequence(Value("int32")), | |
"slipoff_count_after_click": Sequence(Value("int32")), | |
} | |
) | |
ANNOTATION_FEATURES = Features( | |
{ | |
"query_id": Value("string"), | |
"query_md5": Value("string"), | |
"text_md5": Value("string"), | |
"query_document_embedding": Array2D((None, 768), "float16"), | |
"label": Sequence(Value("int32")), | |
"n": Value("int32"), | |
"frequency_bucket": Value("int32"), | |
} | |
) | |
DEFAULT_CONFIG_NAME = Config.CLICKS | |
def _info(self): | |
if self.config.name == Config.CLICKS: | |
features = self.CLICK_FEATURES | |
elif self.config.name == Config.ANNOTATIONS: | |
features = self.ANNOTATION_FEATURES | |
else: | |
raise ValueError( | |
f"Config {self.config.name} must be in ['clicks', 'annotations']" | |
) | |
return datasets.DatasetInfo( | |
description=_DESCRIPTION, | |
features=features, | |
homepage=_HOMEPAGE, | |
license=_LICENSE, | |
citation=_CITATION, | |
) | |
def _split_generators(self, dl_manager): | |
if self.config.name == Config.CLICKS: | |
train_files = self.download_clicks(dl_manager, parts=[1, 2, 3]) | |
test_files = self.download_clicks(dl_manager, parts=[0]) | |
query_columns = [ | |
"query_id", | |
"query_md5", | |
] | |
agg_columns = [ | |
"query_md5", | |
"url_md5", | |
"text_md5", | |
"position", | |
"click", | |
"query_document_embedding", | |
"media_type", | |
"displayed_time", | |
"serp_height", | |
"slipoff_count_after_click", | |
] | |
return [ | |
SplitGenerator( | |
name=Split.TRAIN, | |
gen_kwargs={ | |
"files": train_files, | |
"query_columns": query_columns, | |
"agg_columns": agg_columns, | |
}, | |
), | |
SplitGenerator( | |
name=Split.TEST, | |
gen_kwargs={ | |
"files": test_files, | |
"query_columns": query_columns, | |
"agg_columns": agg_columns, | |
}, | |
), | |
] | |
elif self.config.name == Config.ANNOTATIONS: | |
test_files = dl_manager.download(["parts/validation.feather"]) | |
query_columns = [ | |
"query_id", | |
"query_md5", | |
"frequency_bucket", | |
] | |
agg_columns = [ | |
"text_md5", | |
"label", | |
"query_document_embedding", | |
] | |
return [ | |
SplitGenerator( | |
name=Split.TEST, | |
gen_kwargs={ | |
"files": test_files, | |
"query_columns": query_columns, | |
"agg_columns": agg_columns, | |
}, | |
) | |
] | |
else: | |
raise ValueError("Config name must be in ['clicks', 'annotations']") | |
def download_clicks(self, dl_manager, parts: List[int], splits_per_part: int = 10): | |
urls = [ | |
f"parts/part-{p}_split-{s}.feather" | |
for p in parts | |
for s in range(splits_per_part) | |
] | |
return dl_manager.download(urls) | |
def _generate_examples( | |
self, | |
files: List[str], | |
query_columns: List[str], | |
agg_columns: List[str], | |
): | |
""" | |
Reads dataset partitions and aggregates document features per query. | |
:param files: List of .feather files to load from disk. | |
:param query_columns: Columns with one value per query. E.g., query_id, | |
frequency bucket, etc. | |
:param agg_columns: Columns with one value per document that should be | |
aggregated per query. E.g., click, position, query_document_embeddings, etc. | |
:return: | |
""" | |
for file in files: | |
df = pd.read_feather(file) | |
current_query_id = None | |
sample_key = None | |
sample = None | |
for i in range(len(df)): | |
row = df.iloc[i] | |
if current_query_id != row["query_id"]: | |
if current_query_id is not None: | |
yield sample_key, sample | |
current_query_id = row["query_id"] | |
sample_key = f"{file}-{current_query_id}" | |
sample = {"n": 0} | |
for column in query_columns: | |
sample[column] = row[column] | |
for column in agg_columns: | |
sample[column] = [] | |
for column in agg_columns: | |
sample[column].append(row[column]) | |
sample["n"] += 1 | |
yield sample_key, sample | |