File size: 6,847 Bytes
d4c7d8a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6f3d428
 
d4c7d8a
 
 
6f3d428
 
d4c7d8a
 
4f0638b
d4c7d8a
 
 
 
 
 
4f0638b
d4c7d8a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4f0638b
d4c7d8a
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
"""TODO(hansards): Add a description here."""


import glob
import os

import datasets


# TODO(hansards): BibTeX citation
_CITATION = """
"""

# TODO(hansards):
_DESCRIPTION = """
This release contains 1.3 million pairs of aligned text chunks (sentences or smaller fragments)
from the official records (Hansards) of the 36th Canadian Parliament.

The complete Hansards of the debates in the House and Senate of the 36th Canadian Parliament,
as far as available, were aligned. The corpus was then split into 5 sets of sentence pairs:
training (80% of the sentence pairs), two sets of sentence pairs for testing (5% each), and
two sets of sentence pairs for final evaluation (5% each). The current release consists of the
training and testing sets. The evaluation sets are reserved for future MT evaluation purposes
and currently not available.

Caveats
1. This release contains only sentence pairs. Even though the order of the sentences is the same
as in the original, there may be gaps resulting from many-to-one, many-to-many, or one-to-many
alignments that were filtered out. Therefore, this release may not be suitable for
discourse-related research.
2. Neither the sentence splitting nor the alignments are perfect. In particular, watch out for
pairs that differ considerably in length. You may want to filter these out before you do
any statistical training.

The alignment of the Hansards was performed as part of the ReWrite project under funding
from the DARPA TIDES program.
"""

_URL = "https://www.isi.edu/natural-language/download/hansard/"
_DATA_URL = "http://www.isi.edu/natural-language/download/hansard/"
_HOUSE_DEBATES_TRAIN_SET_FILE = "hansard.36.r2001-1a.house.debates.training.tar"
_HOUSE_DEBATES_TEST_SET_FILE = "hansard.36.r2001-1a.house.debates.testing.tar"
_SENATE_DEBATES_TRAIN_SET_FILE = "hansard.36.r2001-1a.senate.debates.training.tar"
_SENATE_DEBATES_TEST_SET_FILE = "hansard.36.r2001-1a.senate.debates.testing.tar"


class HansardsConfig(datasets.BuilderConfig):
    """BuilderConfig for Hansards."""

    def __init__(self, **kwargs):
        """BuilderConfig for Hansards.
        Args:
          **kwargs: keyword arguments forwarded to super.
        """
        super(HansardsConfig, self).__init__(version=datasets.Version("1.0.0", ""), **kwargs)


class Hansards(datasets.GeneratorBasedBuilder):
    """TODO(hansards): Short description of my dataset."""

    # TODO(hansards): Set up version.
    VERSION = datasets.Version("0.1.0")
    BUILDER_CONFIGS = [
        HansardsConfig(
            name="house",
            description="""\
          Alignment of debates in the House of the 36th Canadian Parliament: 1,070K sentence pairs.
          """,
        ),
        HansardsConfig(
            name="senate",
            description="""\
          Alignment of debates in the Senate of the 36th Canadian Parliament: 208K sentence pairs.
          """,
        ),
    ]

    def _info(self):
        # TODO(hansards): Specifies the datasets.DatasetInfo object
        return datasets.DatasetInfo(
            # This is the description that will appear on the datasets page.
            description=_DESCRIPTION,
            # datasets.features.FeatureConnectors
            features=datasets.Features(
                {
                    "fr": datasets.Value("string"),
                    "en": datasets.Value("string")
                    # These are the features of your dataset like images, labels ...
                }
            ),
            # If there's a common (input, target) tuple from the features,
            # specify them here. They'll be used if as_supervised=True in
            # builder.as_dataset.
            supervised_keys=None,
            # Homepage of the dataset for documentation
            homepage=_URL,
            citation=_CITATION,
        )

    def _split_generators(self, dl_manager):
        """Returns SplitGenerators."""
        # TODO(hansards): Downloads the data and defines the splits
        # dl_manager is a datasets.download.DownloadManager that can be used to
        # download and extract URLs
        name = self.config.name
        if name == "house":
            urls_to_download = {
                "train": _DATA_URL + _HOUSE_DEBATES_TRAIN_SET_FILE,
                "test": _DATA_URL + _HOUSE_DEBATES_TEST_SET_FILE,
            }
        elif name == "senate":
            urls_to_download = {
                "train": _DATA_URL + _SENATE_DEBATES_TRAIN_SET_FILE,
                "test": _DATA_URL + _SENATE_DEBATES_TEST_SET_FILE,
            }
        else:
            raise ValueError(f"Wrong builder config name '{name}', it has to be either 'house' or 'senate'.")
        downloaded_files = dl_manager.download_and_extract(urls_to_download)
        if type(downloaded_files) == str:
            downloaded_files = {k: downloaded_files for k in urls_to_download.keys()}
        fr_files = {}
        en_files = {}
        for split_name in downloaded_files.keys():
            archive_dir = f"hansard.36/Release-2001.1a/sentence-pairs/{name}/debates/development/{split_name + 'ing'}"
            data_dir = os.path.join(downloaded_files[split_name], archive_dir)
            split_compress_files = list(sorted(glob.glob(os.path.join(data_dir, "*.gz"))))
            split_compress_files += list(sorted(glob.glob(os.path.join(data_dir, "**/*.gz"))))
            fr_split_compress_files = sorted([f for f in split_compress_files if f.endswith(".f.gz")])
            en_split_compress_files = sorted([f for f in split_compress_files if f.endswith(".e.gz")])
            fr_files[split_name] = dl_manager.extract(fr_split_compress_files)
            en_files[split_name] = dl_manager.extract(en_split_compress_files)
        return [
            datasets.SplitGenerator(
                name=datasets.Split.TRAIN,
                # These kwargs will be passed to _generate_examples
                gen_kwargs={"fr_files": fr_files["train"], "en_files": en_files["train"]},
            ),
            datasets.SplitGenerator(
                name=datasets.Split.TEST,
                # These kwargs will be passed to _generate_examples
                gen_kwargs={"fr_files": fr_files["test"], "en_files": en_files["test"]},
            ),
        ]

    def _generate_examples(self, fr_files, en_files):
        """Yields examples."""
        # TODO(hansards): Yields (key, example) tuples from the dataset
        for fr_file, en_file in zip(fr_files, en_files):
            with open(fr_file, "rb") as fr:
                with open(en_file, "rb") as en:
                    for j, (fr_line, en_line) in enumerate(zip(fr, en)):
                        line_id = f"{fr_file}:{j}"
                        rec = {"fr": fr_line.decode("ISO-8859-1").strip(), "en": en_line.decode("ISO-8859-1").strip()}
                        yield line_id, rec