Datasets:
GEM
/

Multilinguality:
multilingual
Size Categories:
10K<n<100K
Language Creators:
crowdsourced
Annotations Creators:
expert-generated
Source Datasets:
original
Tags:
License:
xmediasum / xmediasum.py
Krystalan
add xmediasum dataset
15296bd
import json
import datasets
_CITATION = """\
@inproceedings{wang-etal-2022-clidsum,
title = "{C}lid{S}um: A Benchmark Dataset for Cross-Lingual Dialogue Summarization",
author = "Wang, Jiaan and
Meng, Fandong and
Lu, Ziyao and
Zheng, Duo and
Li, Zhixu and
Qu, Jianfeng and
Zhou, Jie",
booktitle = "Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing",
month = dec,
year = "2022",
address = "Abu Dhabi, United Arab Emirates",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2022.emnlp-main.526",
pages = "7716--7729",
abstract = "We present ClidSum, a benchmark dataset towards building cross-lingual summarization systems on dialogue documents. It consists of 67k+ dialogue documents and 112k+ annotated summaries in different target languages. Based on the proposed ClidSum, we introduce two benchmark settings for supervised and semi-supervised scenarios, respectively. We then build various baseline systems in different paradigms (pipeline and end-to-end) and conduct extensive experiments on ClidSum to provide deeper analyses. Furthermore, we propose mDialBART which extends mBART via further pre-training, where the multiple objectives help the pre-trained model capture the structural characteristics as well as key content in dialogues and the transformation from source to the target language. Experimental results show the superiority of mDialBART, as an end-to-end model, outperforms strong pipeline models on ClidSum. Finally, we discuss specific challenges that current approaches faced with this task and give multiple promising directions for future research. We have released the dataset and code at https://github.com/krystalan/ClidSum.",
}
"""
_DESCRIPTION = """\
We present XMediaSum, a cross-lingual dialogue summarization dataset with 40K English(dialogues)->Chinese(summaries) and 40K English (dialogues)->German(summaries) samples. XMediaSum is created by manually translating the English summaries of MediaSum (a English monolingual dialogue summarization dataset) to both Chinese and German.
"""
_HOMEPAGE = "https://github.com/krystalan/ClidSum"
_LICENSE = "Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International License (CC BY-NC-SA 4.0)"
_URLS = {
"train": "https://huggingface.co/datasets/Krystalan/xmediasum/resolve/main/data/train.json",
"dev": "https://huggingface.co/datasets/Krystalan/xmediasum/resolve/main/data/val.json",
"test": "https://huggingface.co/datasets/Krystalan/xmediasum/resolve/main/data/test.json"
}
class xmediasum(datasets.GeneratorBasedBuilder):
BUILDER_CONFIGS = [
datasets.BuilderConfig(
name="xmediasum",
version=datasets.Version("1.0.0", "")
)
]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"dialogue": datasets.Value("string"),
"summary": datasets.Value("string"),
"summary_de": datasets.Value("string"),
"summary_zh": datasets.Value("string")
}
),
supervised_keys=None,
homepage=_HOMEPAGE,
citation=_CITATION,
license=_LICENSE,
version=self.VERSION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
downloaded_files = dl_manager.download_and_extract(_URLS)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"filepath": downloaded_files["train"],
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"filepath": downloaded_files["test"],
},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={
"filepath": downloaded_files["dev"],
},
),
]
def _generate_examples(self, filepath):
"""Yields examples as (key, example) tuples."""
with open(filepath, encoding="utf-8") as f:
data = json.load(f)
for idx, item in enumerate(data):
yield idx, item