|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""Data Loader for Turku Hockey Data2Text corpus""" |
|
|
|
|
|
import csv |
|
import json |
|
import os |
|
import re |
|
|
|
import datasets |
|
|
|
|
|
|
|
_CITATION = """\ |
|
@inproceedings{kanerva2019newsgen, |
|
Title = {Template-free Data-to-Text Generation of Finnish Sports News}, |
|
Author = {Jenna Kanerva and Samuel R{\"o}nnqvist and Riina Kekki and Tapio Salakoski and Filip Ginter}, |
|
booktitle = {Proceedings of the 22nd Nordic Conference on Computational Linguistics (NoDaLiDa’19)}, |
|
year={2019} |
|
} |
|
""" |
|
|
|
|
|
|
|
_DESCRIPTION = """\ |
|
The Turku Hockey Data2Text corpus was developed as a benchmark for evaluating template-free, machine learning methods on Finnish news generation in the area of ice hockey reporting. This dataset is a collection of 3,454 ice hockey games, each including game statistics and a news article describing the game. Each game includes manual alignment of events (such as goals or penalties) and sentences describing the specific event in natural language extracted from the news article. The corpus includes 12,827 annotated events. The natural language passages are manually curated not to include any information not derivable from the input data or world knowledge. |
|
""" |
|
|
|
_HOMEPAGE = "https://github.com/TurkuNLP/Turku-hockey-data2text" |
|
|
|
_LICENSE = "Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International (CC BY-NC-SA 4.0)" |
|
|
|
|
|
|
|
|
|
_URLs = { |
|
'train': 'train.json', |
|
'validation': 'validation.json', |
|
'test': 'test.json' |
|
} |
|
|
|
|
|
|
|
relevant_keys = {"game result": ["event_type", "home_team", "guest_team", "score", "periods", "features"],\ |
|
"goal": ["event_type", "score", "features", "player", "assist", "team", "team_name", "time"],\ |
|
"penalty": ["event_type", "player", "team", "team_name", "time", "penalty_minutes"],\ |
|
"saves": ["event_type", "player", "team", "team_name", "saves"]} |
|
|
|
|
|
class TurkuHockeyData2Text(datasets.GeneratorBasedBuilder): |
|
"""The Turky Hockey Data2Text is a manually curated corpus for Finnish news generation in the area of ice hockey reporting.""" |
|
|
|
VERSION = datasets.Version("1.1.0") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
BUILDER_CONFIGS = [ |
|
datasets.BuilderConfig(name="event-generation", version=VERSION, description="This loads the dataset in its event generation mode, where an output description is generated for each game event separately. The full game information is not availbale."), |
|
datasets.BuilderConfig(name="game-generation", version=VERSION, description="This loads the dataset in its game generation mode, where the full game information is provided, but the actual input–output pairs must be created by the user."), |
|
] |
|
|
|
DEFAULT_CONFIG_NAME = "event-generation" |
|
|
|
def _info(self): |
|
|
|
if self.config.name == "event-generation": |
|
features = datasets.Features( |
|
{ |
|
|
|
"gem_id": datasets.Value("string"), |
|
"game_id": datasets.Value("string"), |
|
"event_id": datasets.Value("string"), |
|
"input": datasets.Value("string"), |
|
"target": datasets.Value("string"), |
|
"references": [datasets.Value("string")] |
|
} |
|
) |
|
|
|
else: |
|
features = datasets.Features( |
|
{ |
|
"gem_id": datasets.Value("string"), |
|
"id": datasets.Value("string"), |
|
"news_article": datasets.Value("string"), |
|
"events": datasets.features.Sequence( |
|
{ |
|
"event_id": datasets.Value("string"), |
|
"event_type": datasets.Value("string"), |
|
"text": datasets.Value("string"), |
|
"home_team": datasets.Value("string"), |
|
"guest_team": datasets.Value("string"), |
|
"score": datasets.Value("string"), |
|
"periods": datasets.features.Sequence(datasets.Value("string")), |
|
"features": datasets.features.Sequence(datasets.Value("string")), |
|
"player": datasets.Value("string"), |
|
"assist": datasets.features.Sequence(datasets.Value("string")), |
|
"team": datasets.Value("string"), |
|
"team_name": datasets.Value("string"), |
|
"time": datasets.Value("string"), |
|
"penalty_minutes": datasets.Value("string"), |
|
"saves": datasets.Value("string"), |
|
"multi_reference": datasets.Value("bool") |
|
} |
|
), |
|
} |
|
) |
|
|
|
|
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=features, |
|
|
|
|
|
|
|
supervised_keys=None, |
|
homepage=_HOMEPAGE, |
|
license=_LICENSE, |
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
"""Returns SplitGenerators.""" |
|
|
|
|
|
|
|
|
|
|
|
|
|
my_urls = _URLs |
|
data_dir = dl_manager.download_and_extract(my_urls) |
|
return [ |
|
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": data_dir["train"], "split": "train"}), |
|
datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": data_dir["validation"], "split": "validation"}), |
|
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": data_dir["test"], "split": "test"}) |
|
] |
|
|
|
def _generate_examples(self, filepath, split): |
|
""" Yields examples as (key, example) tuples. """ |
|
|
|
|
|
|
|
with open(filepath, "rt", encoding="utf-8") as f: |
|
data = json.load(f) |
|
idx = 0 |
|
for i, example in enumerate(data): |
|
if self.config.name == "game-generation": |
|
example["gem_id"] = f"gem-turku_hockey_data2text-{split}-{idx}" |
|
idx += 1 |
|
example = self._generate_features(example) |
|
yield idx, example |
|
else: |
|
game_data = self._generate_features(example) |
|
events = self.create_event_data(game_data["events"]) |
|
for e in events: |
|
e["gem_id"] = f"gem-turku_hockey_data2text-{split}-{idx}" |
|
idx += 1 |
|
e["game_id"] = game_data["id"] |
|
yield idx, e |
|
|
|
|
|
|
|
def _generate_features(self, example): |
|
_events = [] |
|
for i, e in enumerate(example["events"]): |
|
d = {"event_id": e["event_id"], |
|
"event_type": e["event_type"], |
|
"text": e["text"] if e["text"] != None else "", |
|
"home_team": e.get("home_team", ""), |
|
"guest_team": e.get("guest_team", ""), |
|
"score": e.get("score", ""), |
|
"periods": e.get("periods", []), |
|
"features": e.get("features", []), |
|
"player": e.get("player", ""), |
|
"assist": e.get("assist", []), |
|
"team": e.get("team", ""), |
|
"team_name": e.get("team_name", ""), |
|
"time": e.get("time", ""), |
|
"penalty_minutes": e.get("penalty_minutes", ""), |
|
"saves": e.get("saves", ""), |
|
"multi_reference": self._is_multireference(i, example["events"]) |
|
} |
|
_events.append(d) |
|
example["events"] = _events |
|
return example |
|
|
|
def _is_multireference(self, i, events): |
|
""" Return True if this event is one of the multireference events (multiple events refer to the same text passage) |
|
Otherwise, return False. |
|
""" |
|
if events[i]["text"] == None: |
|
return False |
|
multireference = re.compile("E[0-9]+") |
|
if multireference.match(events[i]["text"]): |
|
return True |
|
|
|
multi_events = [] |
|
for event in events: |
|
if event["text"] == None: |
|
continue |
|
if multireference.match(event["text"]): |
|
multi_events.append(event["text"]) |
|
if events[i]["event_id"] in multi_events: |
|
return True |
|
return False |
|
|
|
|
|
|
|
|
|
def create_event_data(self, events): |
|
simplified_events = [] |
|
for i, event in enumerate(events): |
|
e_idx, event_input, event_output = self.event2string(i, events) |
|
if not event_input: |
|
continue |
|
simplified_events.append({"gem_id": "", "game_id": "", "event_id": e_idx, "input": event_input, "target": event_output, "references": [event_output]}) |
|
return simplified_events |
|
|
|
def event2string(self, i, events): |
|
"""Featurize i:th event into string input. |
|
Example: |
|
input: "event_type: saves [SEP] player: Jani Hurme [SEP] team: home [SEP] team_name: TPS [SEP] saves: 25" |
|
output: "TPS:n maalissa Jani Hurme ehti 25 kiekon tielle." |
|
""" |
|
if events[i]["text"] == "": |
|
return None, None, None |
|
if events[i]["multi_reference"] == True: |
|
return None, None, None |
|
event_type = events[i]["event_type"] |
|
|
|
e_data = [] |
|
for key in relevant_keys[event_type]: |
|
if isinstance(events[i][key], str) or isinstance(events[i][key], float): |
|
s = f"{key} = {events[i][key]}" |
|
e_data.append(s) |
|
if isinstance(events[i][key], list) and len(events[i][key]) > 0: |
|
s = f'{key} = {" , ".join(events[i][key])}' |
|
e_data.append(s) |
|
event_input = " [SEP] ".join(e_data) |
|
|
|
e_idx = events[i]["event_id"] |
|
return e_idx, event_input, events[i]["text"] |
|
|