File size: 5,485 Bytes
95dbac8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
"""The MGB Challenge Dataset."""

from __future__ import absolute_import, division, print_function

import logging

import datasets
from collections import deque


_CITATION = """\
@inproceedings{bell2015mgb,
  title={The MGB challenge: Evaluating multi-genre broadcast media recognition},
  author={Bell, Peter and Gales, Mark JF and Hain, Thomas and Kilgour, Jonathan and Lanchantin, Pierre and Liu, Xunying and McParland, Andrew and Renals, Steve and Saz, Oscar and Wester, Mirjam and others},
  booktitle={2015 IEEE Workshop on Automatic Speech Recognition and Understanding (ASRU)},
  pages={687--693},
  year={2015},
  organization={IEEE}
}

"""

_DESCRIPTION = """\
The first edition of the Multi-Genre Broadcast (MGB-1) Challenge is an evaluation of speech recognition, speaker diarization, and lightly supervised alignment using TV recordings in English.

The speech data is broad and multi-genre, spanning the whole range of TV output, and represents a challenging task for speech technology.

In 2015, the challenge used data from the British Broadcasting Corporation (BBC). 
"""

_LM_FILE = "lm.txt"
_TRAINING_FILE = "train.txt"
_DEV_FILE = "dev.txt"

class MGB_1Config(datasets.BuilderConfig):
    """The MGB-1 Dataset."""

    def __init__(self, with_dots=False, **kwargs):
        """BuilderConfig for MGB-1.
        Args:
          **kwargs: keyword arguments forwarded to super.
        """
        self.with_dots = with_dots
        super(MGB_1Config, self).__init__(**kwargs)


class MGB_1(datasets.GeneratorBasedBuilder):
    """The WNUT 17 Emerging Entities Dataset."""

    BUILDER_CONFIG_CLASS = MGB_1Config

    def _info(self):
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=datasets.Features(
                {
                    "id": datasets.Value("string"),
                    "words": datasets.Sequence(datasets.Value("string")),
                    "punctuation": datasets.Sequence(datasets.Value("string")),
                }
            ),
            supervised_keys=None,
            homepage="http://www.mgb-challenge.org/MGB-1.html",
            citation=_CITATION,
        )

    def _split_generators(self, dl_manager):
        """Returns SplitGenerators."""
        urls_to_download = {
            "lm": f"{_URL}{_LM_FILE}",
            "train": f"{_URL}{_TRAINING_FILE}",
            "dev": f"{_URL}{_DEV_FILE}",
        }
        downloaded_files = dl_manager.download_and_extract(urls_to_download)

        return [
            datasets.SplitGenerator(name=datasets.Split('lm'), gen_kwargs={"filepath": downloaded_files["lm"], "start_index": 0}),
            datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"], "start_index": 1}),
            datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"], "start_index": 1}),
        ]

    def _generate_examples(self, filepath, start_index):
        logging.info("⏳ Generating examples from = %s", filepath)
        with open(filepath, encoding="utf-8") as f:
            current_tokens = deque()
            current_labels = deque()
            sentence_counter = 0
            for row in f:
                row = row.rstrip()
                if row:
                    tokens = row.lower().split(" ")
                    tokens = tokens[start_index:]
                    punct = [
                        '<full_stop>',
                        '<dots>',
                        '<comma>',
                        '<exclamation_mark>',
                        '<question_mark>'
                    ]
                    if tokens[0] in punct:
                        # we cannot interpret lines starting with punctuation
                        continue
                    prev_tok = None
                    for i, t in enumerate(tokens):
                        if t in punct and (i == 0 or prev_tok not in punct):
                            if not self.config.with_dots and t == '<dots>':
                                current_labels.append('<full_stop>')
                            else:
                                current_labels.append(t)
                        elif t not in punct:
                            current_tokens.append(t)
                            if i == len(tokens) - 1 or tokens[i+1] not in punct:
                                current_labels.append('<none>')
                        prev_tok = t
                # New sentence
                if not current_tokens:
                    # Consecutive empty lines will cause empty sentences
                    continue
                assert len(current_tokens) == len(current_labels), "💔 between len of tokens & labels"
                sentence = (
                    sentence_counter,
                    {
                        "id": str(sentence_counter),
                        "words": current_tokens,
                        "punctuation": current_labels,
                    },
                )
                sentence_counter += 1
                current_tokens = deque()
                current_labels = deque()
                yield sentence
            # Don't forget last sentence in dataset 🧐
            if current_tokens:
                yield sentence_counter, {
                    "id": str(sentence_counter),
                    "words": current_tokens,
                    "punctuation": current_labels,
                }