|
"""The MGB Challenge Dataset.""" |
|
|
|
from __future__ import absolute_import, division, print_function |
|
|
|
import logging |
|
|
|
import datasets |
|
from collections import deque |
|
|
|
|
|
_CITATION = """\ |
|
@inproceedings{bell2015mgb, |
|
title={The MGB challenge: Evaluating multi-genre broadcast media recognition}, |
|
author={Bell, Peter and Gales, Mark JF and Hain, Thomas and Kilgour, Jonathan and Lanchantin, Pierre and Liu, Xunying and McParland, Andrew and Renals, Steve and Saz, Oscar and Wester, Mirjam and others}, |
|
booktitle={2015 IEEE Workshop on Automatic Speech Recognition and Understanding (ASRU)}, |
|
pages={687--693}, |
|
year={2015}, |
|
organization={IEEE} |
|
} |
|
|
|
""" |
|
|
|
_DESCRIPTION = """\ |
|
The first edition of the Multi-Genre Broadcast (MGB-1) Challenge is an evaluation of speech recognition, speaker diarization, and lightly supervised alignment using TV recordings in English. |
|
|
|
The speech data is broad and multi-genre, spanning the whole range of TV output, and represents a challenging task for speech technology. |
|
|
|
In 2015, the challenge used data from the British Broadcasting Corporation (BBC). |
|
""" |
|
|
|
_LM_FILE = "lm.txt" |
|
_TRAINING_FILE = "train.txt" |
|
_DEV_FILE = "dev.txt" |
|
|
|
class MGB_1Config(datasets.BuilderConfig): |
|
"""The MGB-1 Dataset.""" |
|
|
|
def __init__(self, with_dots=False, **kwargs): |
|
"""BuilderConfig for MGB-1. |
|
Args: |
|
**kwargs: keyword arguments forwarded to super. |
|
""" |
|
self.with_dots = with_dots |
|
super(MGB_1Config, self).__init__(**kwargs) |
|
|
|
|
|
class MGB_1(datasets.GeneratorBasedBuilder): |
|
"""The WNUT 17 Emerging Entities Dataset.""" |
|
|
|
BUILDER_CONFIG_CLASS = MGB_1Config |
|
|
|
def _info(self): |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=datasets.Features( |
|
{ |
|
"id": datasets.Value("string"), |
|
"words": datasets.Sequence(datasets.Value("string")), |
|
"punctuation": datasets.Sequence(datasets.Value("string")), |
|
} |
|
), |
|
supervised_keys=None, |
|
homepage="http://www.mgb-challenge.org/MGB-1.html", |
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
"""Returns SplitGenerators.""" |
|
urls_to_download = { |
|
"lm": f"{_URL}{_LM_FILE}", |
|
"train": f"{_URL}{_TRAINING_FILE}", |
|
"dev": f"{_URL}{_DEV_FILE}", |
|
} |
|
downloaded_files = dl_manager.download_and_extract(urls_to_download) |
|
|
|
return [ |
|
datasets.SplitGenerator(name=datasets.Split('lm'), gen_kwargs={"filepath": downloaded_files["lm"], "start_index": 0}), |
|
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"], "start_index": 1}), |
|
datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"], "start_index": 1}), |
|
] |
|
|
|
def _generate_examples(self, filepath, start_index): |
|
logging.info("β³ Generating examples from = %s", filepath) |
|
with open(filepath, encoding="utf-8") as f: |
|
current_tokens = deque() |
|
current_labels = deque() |
|
sentence_counter = 0 |
|
for row in f: |
|
row = row.rstrip() |
|
if row: |
|
tokens = row.lower().split(" ") |
|
tokens = tokens[start_index:] |
|
punct = [ |
|
'<full_stop>', |
|
'<dots>', |
|
'<comma>', |
|
'<exclamation_mark>', |
|
'<question_mark>' |
|
] |
|
if tokens[0] in punct: |
|
|
|
continue |
|
prev_tok = None |
|
for i, t in enumerate(tokens): |
|
if t in punct and (i == 0 or prev_tok not in punct): |
|
if not self.config.with_dots and t == '<dots>': |
|
current_labels.append('<full_stop>') |
|
else: |
|
current_labels.append(t) |
|
elif t not in punct: |
|
current_tokens.append(t) |
|
if i == len(tokens) - 1 or tokens[i+1] not in punct: |
|
current_labels.append('<none>') |
|
prev_tok = t |
|
|
|
if not current_tokens: |
|
|
|
continue |
|
assert len(current_tokens) == len(current_labels), "π between len of tokens & labels" |
|
sentence = ( |
|
sentence_counter, |
|
{ |
|
"id": str(sentence_counter), |
|
"words": current_tokens, |
|
"punctuation": current_labels, |
|
}, |
|
) |
|
sentence_counter += 1 |
|
current_tokens = deque() |
|
current_labels = deque() |
|
yield sentence |
|
|
|
if current_tokens: |
|
yield sentence_counter, { |
|
"id": str(sentence_counter), |
|
"words": current_tokens, |
|
"punctuation": current_labels, |
|
} |