|
"""TODO(hansards): Add a description here.""" |
|
|
|
|
|
import glob |
|
import os |
|
|
|
import datasets |
|
|
|
|
|
|
|
_CITATION = """ |
|
""" |
|
|
|
|
|
_DESCRIPTION = """ |
|
This release contains 1.3 million pairs of aligned text chunks (sentences or smaller fragments) |
|
from the official records (Hansards) of the 36th Canadian Parliament. |
|
|
|
The complete Hansards of the debates in the House and Senate of the 36th Canadian Parliament, |
|
as far as available, were aligned. The corpus was then split into 5 sets of sentence pairs: |
|
training (80% of the sentence pairs), two sets of sentence pairs for testing (5% each), and |
|
two sets of sentence pairs for final evaluation (5% each). The current release consists of the |
|
training and testing sets. The evaluation sets are reserved for future MT evaluation purposes |
|
and currently not available. |
|
|
|
Caveats |
|
1. This release contains only sentence pairs. Even though the order of the sentences is the same |
|
as in the original, there may be gaps resulting from many-to-one, many-to-many, or one-to-many |
|
alignments that were filtered out. Therefore, this release may not be suitable for |
|
discourse-related research. |
|
2. Neither the sentence splitting nor the alignments are perfect. In particular, watch out for |
|
pairs that differ considerably in length. You may want to filter these out before you do |
|
any statistical training. |
|
|
|
The alignment of the Hansards was performed as part of the ReWrite project under funding |
|
from the DARPA TIDES program. |
|
""" |
|
|
|
_URL = "https://www.isi.edu/natural-language/download/hansard/" |
|
_DATA_URL = "http://www.isi.edu/natural-language/download/hansard/" |
|
_HOUSE_DEBATES_TRAIN_SET_FILE = "hansard.36.r2001-1a.house.debates.training.tar" |
|
_HOUSE_DEBATES_TEST_SET_FILE = "hansard.36.r2001-1a.house.debates.testing.tar" |
|
_SENATE_DEBATES_TRAIN_SET_FILE = "hansard.36.r2001-1a.senate.debates.training.tar" |
|
_SENATE_DEBATES_TEST_SET_FILE = "hansard.36.r2001-1a.senate.debates.testing.tar" |
|
|
|
|
|
class HansardsConfig(datasets.BuilderConfig): |
|
"""BuilderConfig for Hansards.""" |
|
|
|
def __init__(self, **kwargs): |
|
"""BuilderConfig for Hansards. |
|
Args: |
|
**kwargs: keyword arguments forwarded to super. |
|
""" |
|
super(HansardsConfig, self).__init__(version=datasets.Version("1.0.0", ""), **kwargs) |
|
|
|
|
|
class Hansards(datasets.GeneratorBasedBuilder): |
|
"""TODO(hansards): Short description of my dataset.""" |
|
|
|
|
|
VERSION = datasets.Version("0.1.0") |
|
BUILDER_CONFIGS = [ |
|
HansardsConfig( |
|
name="house", |
|
description="""\ |
|
Alignment of debates in the House of the 36th Canadian Parliament: 1,070K sentence pairs. |
|
""", |
|
), |
|
HansardsConfig( |
|
name="senate", |
|
description="""\ |
|
Alignment of debates in the Senate of the 36th Canadian Parliament: 208K sentence pairs. |
|
""", |
|
), |
|
] |
|
|
|
def _info(self): |
|
|
|
return datasets.DatasetInfo( |
|
|
|
description=_DESCRIPTION, |
|
|
|
features=datasets.Features( |
|
{ |
|
"fr": datasets.Value("string"), |
|
"en": datasets.Value("string") |
|
|
|
} |
|
), |
|
|
|
|
|
|
|
supervised_keys=None, |
|
|
|
homepage=_URL, |
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
"""Returns SplitGenerators.""" |
|
|
|
|
|
|
|
name = self.config.name |
|
if name == "house": |
|
urls_to_download = { |
|
"train": _DATA_URL + _HOUSE_DEBATES_TRAIN_SET_FILE, |
|
"test": _DATA_URL + _HOUSE_DEBATES_TEST_SET_FILE, |
|
} |
|
elif name == "senate": |
|
urls_to_download = { |
|
"train": _DATA_URL + _SENATE_DEBATES_TRAIN_SET_FILE, |
|
"test": _DATA_URL + _SENATE_DEBATES_TEST_SET_FILE, |
|
} |
|
else: |
|
raise ValueError(f"Wrong builder config name '{name}', it has to be either 'house' or 'senate'.") |
|
downloaded_files = dl_manager.download_and_extract(urls_to_download) |
|
if type(downloaded_files) == str: |
|
downloaded_files = {k: downloaded_files for k in urls_to_download.keys()} |
|
fr_files = {} |
|
en_files = {} |
|
for split_name in downloaded_files.keys(): |
|
archive_dir = f"hansard.36/Release-2001.1a/sentence-pairs/{name}/debates/development/{split_name + 'ing'}" |
|
data_dir = os.path.join(downloaded_files[split_name], archive_dir) |
|
split_compress_files = list(sorted(glob.glob(os.path.join(data_dir, "*.gz")))) |
|
split_compress_files += list(sorted(glob.glob(os.path.join(data_dir, "**/*.gz")))) |
|
fr_split_compress_files = sorted([f for f in split_compress_files if f.endswith(".f.gz")]) |
|
en_split_compress_files = sorted([f for f in split_compress_files if f.endswith(".e.gz")]) |
|
fr_files[split_name] = dl_manager.extract(fr_split_compress_files) |
|
en_files[split_name] = dl_manager.extract(en_split_compress_files) |
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
|
|
gen_kwargs={"fr_files": fr_files["train"], "en_files": en_files["train"]}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
|
|
gen_kwargs={"fr_files": fr_files["test"], "en_files": en_files["test"]}, |
|
), |
|
] |
|
|
|
def _generate_examples(self, fr_files, en_files): |
|
"""Yields examples.""" |
|
|
|
for fr_file, en_file in zip(fr_files, en_files): |
|
with open(fr_file, "rb") as fr: |
|
with open(en_file, "rb") as en: |
|
for j, (fr_line, en_line) in enumerate(zip(fr, en)): |
|
line_id = f"{fr_file}:{j}" |
|
rec = {"fr": fr_line.decode("ISO-8859-1").strip(), "en": en_line.decode("ISO-8859-1").strip()} |
|
yield line_id, rec |
|
|