trec-cast-2019-multi-turn / trec-cast-2019-multi-turn.py
Giguru Scheuer
Renamed file
a61ea77
# coding=utf-8
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from collections import defaultdict
import datasets
import csv
from trec_car import read_data
# Find for instance the citation on arxiv or on the dataset repo/website
_CITATION = """\
@misc{dalton2020trec,
title={TREC CAsT 2019: The Conversational Assistance Track Overview},
author={Jeffrey Dalton and Chenyan Xiong and Jamie Callan},
year={2020},
eprint={2003.13624},
archivePrefix={arXiv},
primaryClass={cs.IR}
}
"""
# You can copy an official description
_DESCRIPTION = """\
The Conversational Assistance Track (CAsT) is a new track for TREC 2019 to facilitate Conversational Information
Seeking (CIS) research and to create a large-scale reusable test collection for conversational search systems.
The document corpus is 38,426,252 passages from the TREC Complex Answer Retrieval (CAR) and Microsoft MAchine
Reading COmprehension (MARCO) datasets.
"""
_HOMEPAGE = "http://www.treccast.ai"
_LICENSE = ""
# The HuggingFace dataset library don't host the datasets but only point to the original files
# This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
_URL = "https://huggingface.co/datasets/uva-irlab/trec-cast-2019-multi-turn/resolve/main/"
_URLs = {
'topics': _URL+"cast2019_test_annotated_without_context.tsv",
'topics_with_context': _URL+"cast2019_test_annotated_with_context.tsv",
'qrels': _URL+"2019qrels.txt",
'test_collection': {
'car': "http://trec-car.cs.unh.edu/datareleases/v2.0/paragraphCorpus.v2.0.tar.xz",
'msmarco': 'https://msmarco.blob.core.windows.net/msmarcoranking/collection.tar.gz',
},
}
SAMPLE_SIZE = 100000
class TrecCast2019MultiTurn(datasets.GeneratorBasedBuilder):
VERSION = datasets.Version("1.0.1")
# This is an example of a dataset with multiple configurations.
# If you don't want/need to define several sub-sets in your dataset,
# just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
# If you need to make complex sub-parts in the datasets with configurable options
# You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
# BUILDER_CONFIG_CLASS = MyBuilderConfig
# You will be able to load one or the other configurations in the following list with
# data = datasets.load_dataset('my_dataset', 'first_domain')
# data = datasets.load_dataset('my_dataset', 'second_domain')
BUILDER_CONFIGS = [
datasets.BuilderConfig(name="qrels",
version=VERSION,
description=""),
datasets.BuilderConfig(name="topics",
version=VERSION,
description="The topics contain the queries, query IDs and their history."),
datasets.BuilderConfig(name="topics_with_context",
version=VERSION,
description="The topics contain the queries with relevant terms from the history, query IDs and their history."),
datasets.BuilderConfig(name="test_collection",
version=VERSION,
description="The test collection will provide the passages of TREC CAR and MSMARCO"),
datasets.BuilderConfig(name="test_collection_sample",
version=VERSION,
description="A small sample of 20000 of the test collection passages."),
]
# It's not mandatory to have a default configuration. Just use one if it make sense.
DEFAULT_CONFIG_NAME = "test_collection"
def _info(self):
# This is the name of the configuration selected in BUILDER_CONFIGS above
download_size = None
if self.config.name == "topics":
features = datasets.Features({
"qid": datasets.Value("string"),
"history": datasets.features.Sequence(feature=datasets.Value('string')),
"query": datasets.Value("string"),
})
download_size = 6784
elif self.config.name == "topics_with_context":
features = datasets.Features({
"qid": datasets.Value("string"),
"history": datasets.features.Sequence(feature=datasets.Value('string')),
"query": datasets.Value("string"),
})
download_size = 8010
elif self.config.name == "qrels":
features = datasets.Features({
"qid": datasets.Value("string"),
"qrels": datasets.features.Sequence(feature=datasets.Features({
'docno': datasets.Value("string"),
'relevance': datasets.Value("string"),
})),
})
download_size = 1138032
else: # for self.config.name == 'test_collection':
features = datasets.Features({
"docno": datasets.Value("string"),
"text": datasets.Value("string"),
})
download_size = 5085726092 + 1035009698
return datasets.DatasetInfo(
# This is the description that will appear on the datasets page.
description=_DESCRIPTION,
# This defines the different columns of the dataset and their types
features=features, # Here we define them above because they are different between the two configurations
# If there's a common (input, target) tuple from the features,
# specify them here. They'll be used if as_supervised=True in
# builder.as_dataset.
supervised_keys=None,
# Homepage of the dataset for documentation
homepage=_HOMEPAGE,
# License for the dataset if available
license=_LICENSE,
# Citation for the dataset
citation=_CITATION,
download_size=download_size
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
# TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
# If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
# dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLs
# It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
# By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
urlkey = 'test_collection' if self.config.name == 'test_collection_sample' else self.config.name
my_urls = _URLs[urlkey]
downloaded_files = dl_manager.download_and_extract(my_urls)
return [
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={ # These kwargs will be passed to _generate_examples
"file": downloaded_files,
"split": self.config.name
},
),
]
def _generate_examples(
self, file, split # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
):
""" Yields examples as (key, example) tuples. """
# This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
# The `key` is here for legacy reason (tfds) and is not important in itself.
if split == 'qrels':
qrels_file = csv.reader(open(file), delimiter=" ")
qrels = defaultdict(list)
for row in qrels_file:
qid = row[0]
docno = row[2]
relevance = row[3]
qrels[qid].append({'docno': docno, 'relevance': relevance})
for qid in qrels.keys():
yield qid, {'qid': qid, 'qrels': qrels[qid]}
elif split == 'topics' or split == 'topics_with_context':
topics_file = csv.reader(open(file), delimiter="\t")
topics = defaultdict(list)
for row in topics_file:
qid, query = row
conversation_id, question_number = qid.split('_')
topics[conversation_id].append(query)
for conversation_id in topics.keys():
queries = topics[conversation_id] # type: list
for idx in range(len(queries)):
query = queries[idx]
qid = f"{conversation_id}_{str(idx+1)}"
yield qid, ({'query': query, 'history': queries[:idx], 'qid': qid})
elif split == 'test_collection' or split == 'test_collection_sample':
car_file = file['car'] + "/paragraphCorpus/dedup.articles-paragraphs.cbor"
msmarco_file = file['msmarco']+"/collection.tsv"
is_sample = split == 'test_collection_sample'
i = 0
with open(car_file, 'rb') as f:
for para in read_data.iter_paragraphs(f):
docid = f"CAR_{para.para_id}"
yield docid, ({"docno": docid, "text": para.get_text()})
i += 1
if is_sample and i >= SAMPLE_SIZE:
break
i = 0
with open(msmarco_file) as f:
msmarco = csv.reader(f, delimiter="\t")
for line in msmarco:
docid, text = line
docid = f"MARCO_{docid}"
yield docid, ({"docno": docid, "text": text})
i += 1
if is_sample and i >= SAMPLE_SIZE:
break
else:
raise NotImplementedError(f"'{split}' is not yet implemented")