|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""Polish Summaries Corpus: the corpus of Polish news summaries""" |
|
|
|
|
|
import glob |
|
import xml.etree.ElementTree as ET |
|
|
|
import datasets |
|
|
|
|
|
|
|
|
|
_CITATION = """\ |
|
@inproceedings{ |
|
ogro:kop:14:lrec, |
|
author = "Ogrodniczuk, Maciej and Kopeć, Mateusz", |
|
pdf = "http://nlp.ipipan.waw.pl/Bib/ogro:kop:14:lrec.pdf", |
|
title = "The {P}olish {S}ummaries {C}orpus", |
|
pages = "3712--3715", |
|
crossref = "lrec:14" |
|
} |
|
@proceedings{ |
|
lrec:14, |
|
editor = "Calzolari, Nicoletta and Choukri, Khalid and Declerck, Thierry and Loftsson, Hrafn and Maegaard, Bente and Mariani, Joseph and Moreno, Asuncion and Odijk, Jan and Piperidis, Stelios", |
|
isbn = "978-2-9517408-8-4", |
|
title = "Proceedings of the Ninth International {C}onference on {L}anguage {R}esources and {E}valuation, {LREC}~2014", |
|
url = "http://www.lrec-conf.org/proceedings/lrec2014/index.html", |
|
booktitle = "Proceedings of the Ninth International {C}onference on {L}anguage {R}esources and {E}valuation, {LREC}~2014", |
|
address = "Reykjavík, Iceland", |
|
key = "LREC", |
|
year = "2014", |
|
organization = "European Language Resources Association (ELRA)" |
|
} |
|
""" |
|
|
|
|
|
|
|
_DESCRIPTION = """\ |
|
Polish Summaries Corpus: the corpus of Polish news summaries. |
|
""" |
|
|
|
|
|
_HOMEPAGE = "http://zil.ipipan.waw.pl/PolishSummariesCorpus" |
|
|
|
|
|
_LICENSE = "CC BY v.3" |
|
|
|
|
|
|
|
|
|
_URL = "http://zil.ipipan.waw.pl/PolishSummariesCorpus?action=AttachFile&do=get&target=PSC_1.0.zip" |
|
|
|
|
|
|
|
class Polsum(datasets.GeneratorBasedBuilder): |
|
"""Polish Summaries Corpus: the corpus of Polish news summaries.""" |
|
|
|
VERSION = datasets.Version("1.1.0") |
|
|
|
def _info(self): |
|
features = datasets.Features( |
|
{ |
|
"id": datasets.Value("string"), |
|
"date": datasets.Value("string"), |
|
"title": datasets.Value("string"), |
|
"section": datasets.Value("string"), |
|
"authors": datasets.Value("string"), |
|
"body": datasets.Value("string"), |
|
"summaries": datasets.features.Sequence( |
|
{ |
|
"ratio": datasets.Value("int32"), |
|
"type": datasets.Value("string"), |
|
"author": datasets.Value("string"), |
|
"body": datasets.Value("string"), |
|
"spans": datasets.features.Sequence( |
|
{ |
|
"start": datasets.Value("int32"), |
|
"end": datasets.Value("int32"), |
|
"span_text": datasets.Value("string"), |
|
} |
|
), |
|
} |
|
), |
|
} |
|
) |
|
return datasets.DatasetInfo( |
|
|
|
description=_DESCRIPTION, |
|
|
|
features=features, |
|
|
|
|
|
|
|
supervised_keys=None, |
|
|
|
homepage=_HOMEPAGE, |
|
|
|
license=_LICENSE, |
|
|
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
"""Returns SplitGenerators.""" |
|
|
|
|
|
|
|
data_dir = dl_manager.download_and_extract(_URL) |
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
|
|
gen_kwargs={ |
|
"filepaths": glob.glob(data_dir + "/*/*/*.xml"), |
|
}, |
|
), |
|
] |
|
|
|
def _generate_examples(self, filepaths): |
|
"""Yields examples.""" |
|
|
|
|
|
|
|
|
|
for i, xml_path in enumerate(sorted(filepaths)): |
|
root = ET.parse(xml_path).getroot() |
|
text_id = root.get("id") |
|
date_tag = root.find("date") |
|
date = date_tag.text.strip() |
|
title_tag = root.find("title") |
|
title = title_tag.text.strip() |
|
section_tag = root.find("section") |
|
section = section_tag.text.strip() |
|
authors_tag = root.find("authors") |
|
authors = authors_tag.text.strip() |
|
body_tag = root.find("body") |
|
body = body_tag.text.strip() |
|
summaries_tag = root.find("summaries") |
|
summaries = [] |
|
for summary_tag in summaries_tag.iterfind("summary"): |
|
sratio = int(summary_tag.get("ratio")) |
|
stype = summary_tag.get("type") |
|
sauthor = summary_tag.get("author") |
|
sbody_tag = summary_tag.find("body") |
|
sbody = sbody_tag.text.strip() |
|
spans_tag = summary_tag.find("spans") |
|
spans = [] |
|
if spans_tag: |
|
for span_tag in spans_tag.iterfind("span"): |
|
start = int(span_tag.get("start")) |
|
end = int(span_tag.get("end")) |
|
span_text = span_tag.text.strip() |
|
spans.append( |
|
{ |
|
"start": start, |
|
"end": end, |
|
"span_text": span_text, |
|
} |
|
) |
|
summaries.append( |
|
{ |
|
"ratio": sratio, |
|
"type": stype, |
|
"author": sauthor, |
|
"body": sbody, |
|
"spans": spans, |
|
} |
|
) |
|
|
|
yield i, { |
|
"id": text_id, |
|
"date": date, |
|
"title": title, |
|
"section": section, |
|
"authors": authors, |
|
"body": body, |
|
"summaries": summaries, |
|
} |
|
|