system HF staff commited on
Commit
d4c7d8a
0 Parent(s):

Update files from the datasets library (from 1.0.0)

Browse files

Release notes: https://github.com/huggingface/datasets/releases/tag/1.0.0

Files changed (13) hide show
  1. .gitattributes +27 -0
  2. dataset_infos.json +1 -0
  3. dummy/house/1.0.0/dummy_data.zip +3 -0
  4. dummy/house/1.0.0/dummy_data/hansard.36.r2001-1a.house.debates.testing.tar/hansard.36/Release-2001.1a/sentence-pairs/house/debates/development/testing/hansard.36.1.house.debates.001.e.gz +3 -0
  5. dummy/house/1.0.0/dummy_data/hansard.36.r2001-1a.house.debates.testing.tar/hansard.36/Release-2001.1a/sentence-pairs/house/debates/development/testing/hansard.36.1.house.debates.001.f.gz +3 -0
  6. dummy/house/1.0.0/dummy_data/hansard.36.r2001-1a.house.debates.training.tar/hansard.36/Release-2001.1a/sentence-pairs/house/debates/development/training/hansard.36.1.house.debates.002.e.gz +3 -0
  7. dummy/house/1.0.0/dummy_data/hansard.36.r2001-1a.house.debates.training.tar/hansard.36/Release-2001.1a/sentence-pairs/house/debates/development/training/hansard.36.1.house.debates.002.f.gz +3 -0
  8. dummy/senate/1.0.0/dummy_data.zip +3 -0
  9. dummy/senate/1.0.0/dummy_data/hansard.36.r2001-1a.senate.debates.testing.tar/hansard.36/Release-2001.1a/sentence-pairs/senate/debates/development/testing/hansard.36.1.house.debates.001.e.gz +3 -0
  10. dummy/senate/1.0.0/dummy_data/hansard.36.r2001-1a.senate.debates.testing.tar/hansard.36/Release-2001.1a/sentence-pairs/senate/debates/development/testing/hansard.36.1.house.debates.001.f.gz +3 -0
  11. dummy/senate/1.0.0/dummy_data/hansard.36.r2001-1a.senate.debates.training.tar/hansard.36/Release-2001.1a/sentence-pairs/senate/debates/development/training/hansard.36.1.house.debates.002.e.gz +3 -0
  12. dummy/senate/1.0.0/dummy_data/hansard.36.r2001-1a.senate.debates.training.tar/hansard.36/Release-2001.1a/sentence-pairs/senate/debates/development/training/hansard.36.1.house.debates.002.f.gz +3 -0
  13. hansards.py +157 -0
.gitattributes ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bin.* filter=lfs diff=lfs merge=lfs -text
5
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.model filter=lfs diff=lfs merge=lfs -text
12
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
13
+ *.onnx filter=lfs diff=lfs merge=lfs -text
14
+ *.ot filter=lfs diff=lfs merge=lfs -text
15
+ *.parquet filter=lfs diff=lfs merge=lfs -text
16
+ *.pb filter=lfs diff=lfs merge=lfs -text
17
+ *.pt filter=lfs diff=lfs merge=lfs -text
18
+ *.pth filter=lfs diff=lfs merge=lfs -text
19
+ *.rar filter=lfs diff=lfs merge=lfs -text
20
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
21
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
22
+ *.tflite filter=lfs diff=lfs merge=lfs -text
23
+ *.tgz filter=lfs diff=lfs merge=lfs -text
24
+ *.xz filter=lfs diff=lfs merge=lfs -text
25
+ *.zip filter=lfs diff=lfs merge=lfs -text
26
+ *.zstandard filter=lfs diff=lfs merge=lfs -text
27
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
dataset_infos.json ADDED
@@ -0,0 +1 @@
 
1
+ {"senate": {"description": "\nThis release contains 1.3 million pairs of aligned text chunks (sentences or smaller fragments)\nfrom the official records (Hansards) of the 36th Canadian Parliament.\n\nThe complete Hansards of the debates in the House and Senate of the 36th Canadian Parliament,\nas far as available, were aligned. The corpus was then split into 5 sets of sentence pairs:\ntraining (80% of the sentence pairs), two sets of sentence pairs for testing (5% each), and\ntwo sets of sentence pairs for final evaluation (5% each). The current release consists of the\ntraining and testing sets. The evaluation sets are reserved for future MT evaluation purposes\nand currently not available.\n\nCaveats\n1. This release contains only sentence pairs. Even though the order of the sentences is the same\nas in the original, there may be gaps resulting from many-to-one, many-to-many, or one-to-many\nalignments that were filtered out. Therefore, this release may not be suitable for\ndiscourse-related research. \n2. Neither the sentence splitting nor the alignments are perfect. In particular, watch out for\npairs that differ considerably in length. You may want to filter these out before you do\nany statistical training.\n\nThe alignment of the Hansards was performed as part of the ReWrite project under funding\nfrom the DARPA TIDES program.\n", "citation": "\n", "homepage": "https://www.isi.edu/natural-language/download/hansard/", "license": "", "features": {"fr": {"dtype": "string", "id": null, "_type": "Value"}, "en": {"dtype": "string", "id": null, "_type": "Value"}}, "supervised_keys": null, "builder_name": "hansards", "config_name": "senate", "version": {"version_str": "1.0.0", "description": "", "datasets_version_to_prepare": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 5711686, "num_examples": 25553, "dataset_name": "hansards"}, "train": {"name": "train", "num_bytes": 40324278, "num_examples": 182135, "dataset_name": "hansards"}}, "download_checksums": {"http://www.isi.edu/natural-language/download/hansard/hansard.36.r2001-1a.senate.debates.training.tar": {"num_bytes": 13363200, "checksum": "f6e0b790133142244c3dffc8b72580bdee93958411c54b80e60febd53a22db53"}, "http://www.isi.edu/natural-language/download/hansard/hansard.36.r2001-1a.senate.debates.testing.tar": {"num_bytes": 1884160, "checksum": "86e76d003ee42458cb357b9227b147f63695195c69e89f43dcd84d7f66c819d2"}}, "download_size": 15247360, "dataset_size": 46035964, "size_in_bytes": 61283324}, "house": {"description": "\nThis release contains 1.3 million pairs of aligned text chunks (sentences or smaller fragments)\nfrom the official records (Hansards) of the 36th Canadian Parliament.\n\nThe complete Hansards of the debates in the House and Senate of the 36th Canadian Parliament,\nas far as available, were aligned. The corpus was then split into 5 sets of sentence pairs:\ntraining (80% of the sentence pairs), two sets of sentence pairs for testing (5% each), and\ntwo sets of sentence pairs for final evaluation (5% each). The current release consists of the\ntraining and testing sets. The evaluation sets are reserved for future MT evaluation purposes\nand currently not available.\n\nCaveats\n1. This release contains only sentence pairs. Even though the order of the sentences is the same\nas in the original, there may be gaps resulting from many-to-one, many-to-many, or one-to-many\nalignments that were filtered out. Therefore, this release may not be suitable for\ndiscourse-related research. \n2. Neither the sentence splitting nor the alignments are perfect. In particular, watch out for\npairs that differ considerably in length. You may want to filter these out before you do\nany statistical training.\n\nThe alignment of the Hansards was performed as part of the ReWrite project under funding\nfrom the DARPA TIDES program.\n", "citation": "\n", "homepage": "https://www.isi.edu/natural-language/download/hansard/", "license": "", "features": {"fr": {"dtype": "string", "id": null, "_type": "Value"}, "en": {"dtype": "string", "id": null, "_type": "Value"}}, "supervised_keys": null, "builder_name": "hansards", "config_name": "house", "version": {"version_str": "1.0.0", "description": "", "datasets_version_to_prepare": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 22906629, "num_examples": 122290, "dataset_name": "hansards"}, "train": {"name": "train", "num_bytes": 191459584, "num_examples": 947969, "dataset_name": "hansards"}}, "download_checksums": {"http://www.isi.edu/natural-language/download/hansard/hansard.36.r2001-1a.house.debates.training.tar": {"num_bytes": 60569600, "checksum": "8ea77a34538c7a5c942a44964dad5a56cfbac5605c0517196b7aece353ee3992"}, "http://www.isi.edu/natural-language/download/hansard/hansard.36.r2001-1a.house.debates.testing.tar": {"num_bytes": 7014400, "checksum": "c33692dfac3c727a2fb0103487638ea7f2df16ff01b856e7b8ebff68dcc5904a"}}, "download_size": 67584000, "dataset_size": 214366213, "size_in_bytes": 281950213}}
dummy/house/1.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bbc65e8086a55d5564f8c58340095ce485fa0a77a67a94be30ea2e18922bcdee
3
+ size 7941
dummy/house/1.0.0/dummy_data/hansard.36.r2001-1a.house.debates.testing.tar/hansard.36/Release-2001.1a/sentence-pairs/house/debates/development/testing/hansard.36.1.house.debates.001.e.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e91ae75683715d190479a076e7e85370c2a841e9671c4d945b9e46753b4d3094
3
+ size 320
dummy/house/1.0.0/dummy_data/hansard.36.r2001-1a.house.debates.testing.tar/hansard.36/Release-2001.1a/sentence-pairs/house/debates/development/testing/hansard.36.1.house.debates.001.f.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0d08307459135f82baf800821a093fe10faa298fd36bf45e45dbdb5ab17a2dd2
3
+ size 368
dummy/house/1.0.0/dummy_data/hansard.36.r2001-1a.house.debates.training.tar/hansard.36/Release-2001.1a/sentence-pairs/house/debates/development/training/hansard.36.1.house.debates.002.e.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c64e6c74e3956aceb9c625cfe4f866101fbf558d07dfb6141862ff2aeb7bfabe
3
+ size 129
dummy/house/1.0.0/dummy_data/hansard.36.r2001-1a.house.debates.training.tar/hansard.36/Release-2001.1a/sentence-pairs/house/debates/development/training/hansard.36.1.house.debates.002.f.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:eeaf64b79bae82651cdfc115430dc54c5d3b4b4d708585a55ac73130f4e9cca1
3
+ size 136
dummy/senate/1.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:13fbe9da79f4772f2442e3e94587284cca293ebb6d3fa8959284f5376c29c38c
3
+ size 8005
dummy/senate/1.0.0/dummy_data/hansard.36.r2001-1a.senate.debates.testing.tar/hansard.36/Release-2001.1a/sentence-pairs/senate/debates/development/testing/hansard.36.1.house.debates.001.e.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e91ae75683715d190479a076e7e85370c2a841e9671c4d945b9e46753b4d3094
3
+ size 320
dummy/senate/1.0.0/dummy_data/hansard.36.r2001-1a.senate.debates.testing.tar/hansard.36/Release-2001.1a/sentence-pairs/senate/debates/development/testing/hansard.36.1.house.debates.001.f.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0d08307459135f82baf800821a093fe10faa298fd36bf45e45dbdb5ab17a2dd2
3
+ size 368
dummy/senate/1.0.0/dummy_data/hansard.36.r2001-1a.senate.debates.training.tar/hansard.36/Release-2001.1a/sentence-pairs/senate/debates/development/training/hansard.36.1.house.debates.002.e.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c64e6c74e3956aceb9c625cfe4f866101fbf558d07dfb6141862ff2aeb7bfabe
3
+ size 129
dummy/senate/1.0.0/dummy_data/hansard.36.r2001-1a.senate.debates.training.tar/hansard.36/Release-2001.1a/sentence-pairs/senate/debates/development/training/hansard.36.1.house.debates.002.f.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:eeaf64b79bae82651cdfc115430dc54c5d3b4b4d708585a55ac73130f4e9cca1
3
+ size 136
hansards.py ADDED
@@ -0,0 +1,157 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """TODO(hansards): Add a description here."""
2
+
3
+ from __future__ import absolute_import, division, print_function
4
+
5
+ import glob
6
+ import os
7
+
8
+ import datasets
9
+
10
+
11
+ # TODO(hansards): BibTeX citation
12
+ _CITATION = """
13
+ """
14
+
15
+ # TODO(hansards):
16
+ _DESCRIPTION = """
17
+ This release contains 1.3 million pairs of aligned text chunks (sentences or smaller fragments)
18
+ from the official records (Hansards) of the 36th Canadian Parliament.
19
+
20
+ The complete Hansards of the debates in the House and Senate of the 36th Canadian Parliament,
21
+ as far as available, were aligned. The corpus was then split into 5 sets of sentence pairs:
22
+ training (80% of the sentence pairs), two sets of sentence pairs for testing (5% each), and
23
+ two sets of sentence pairs for final evaluation (5% each). The current release consists of the
24
+ training and testing sets. The evaluation sets are reserved for future MT evaluation purposes
25
+ and currently not available.
26
+
27
+ Caveats
28
+ 1. This release contains only sentence pairs. Even though the order of the sentences is the same
29
+ as in the original, there may be gaps resulting from many-to-one, many-to-many, or one-to-many
30
+ alignments that were filtered out. Therefore, this release may not be suitable for
31
+ discourse-related research.
32
+ 2. Neither the sentence splitting nor the alignments are perfect. In particular, watch out for
33
+ pairs that differ considerably in length. You may want to filter these out before you do
34
+ any statistical training.
35
+
36
+ The alignment of the Hansards was performed as part of the ReWrite project under funding
37
+ from the DARPA TIDES program.
38
+ """
39
+
40
+ _URL = "https://www.isi.edu/natural-language/download/hansard/"
41
+ _DATA_URL = "http://www.isi.edu/natural-language/download/hansard/"
42
+ _HOUSE_DEBATES_TRAIN_SET_FILE = "hansard.36.r2001-1a.house.debates.training.tar"
43
+ _HOUSE_DEBATES_TEST_SET_FILE = "hansard.36.r2001-1a.house.debates.testing.tar"
44
+ _SENATE_DEBATES_TRAIN_SET_FILE = "hansard.36.r2001-1a.senate.debates.training.tar"
45
+ _SENATE_DEBATES_TEST_SET_FILE = "hansard.36.r2001-1a.senate.debates.testing.tar"
46
+
47
+
48
+ class HansardsConfig(datasets.BuilderConfig):
49
+ """BuilderConfig for Hansards."""
50
+
51
+ def __init__(self, **kwargs):
52
+ """BuilderConfig for Hansards.
53
+ Args:
54
+ **kwargs: keyword arguments forwarded to super.
55
+ """
56
+ super(HansardsConfig, self).__init__(version=datasets.Version("1.0.0", ""), **kwargs)
57
+
58
+
59
+ class Hansards(datasets.GeneratorBasedBuilder):
60
+ """TODO(hansards): Short description of my dataset."""
61
+
62
+ # TODO(hansards): Set up version.
63
+ VERSION = datasets.Version("0.1.0")
64
+ BUILDER_CONFIGS = [
65
+ HansardsConfig(
66
+ name="house",
67
+ description="""\
68
+ Alignment of debates in the House of the 36th Canadian Parliament: 1,070K sentence pairs.
69
+ """,
70
+ ),
71
+ HansardsConfig(
72
+ name="senate",
73
+ description="""\
74
+ Alignment of debates in the Senate of the 36th Canadian Parliament: 208K sentence pairs.
75
+ """,
76
+ ),
77
+ ]
78
+
79
+ def _info(self):
80
+ # TODO(hansards): Specifies the datasets.DatasetInfo object
81
+ return datasets.DatasetInfo(
82
+ # This is the description that will appear on the datasets page.
83
+ description=_DESCRIPTION,
84
+ # datasets.features.FeatureConnectors
85
+ features=datasets.Features(
86
+ {
87
+ "fr": datasets.Value("string"),
88
+ "en": datasets.Value("string")
89
+ # These are the features of your dataset like images, labels ...
90
+ }
91
+ ),
92
+ # If there's a common (input, target) tuple from the features,
93
+ # specify them here. They'll be used if as_supervised=True in
94
+ # builder.as_dataset.
95
+ supervised_keys=None,
96
+ # Homepage of the dataset for documentation
97
+ homepage=_URL,
98
+ citation=_CITATION,
99
+ )
100
+
101
+ def _split_generators(self, dl_manager):
102
+ """Returns SplitGenerators."""
103
+ # TODO(hansards): Downloads the data and defines the splits
104
+ # dl_manager is a datasets.download.DownloadManager that can be used to
105
+ # download and extract URLs
106
+ name = self.config.name
107
+ if name == "house":
108
+ urls_to_download = {
109
+ "train": os.path.join(_DATA_URL, _HOUSE_DEBATES_TRAIN_SET_FILE),
110
+ "test": os.path.join(_DATA_URL, _HOUSE_DEBATES_TEST_SET_FILE),
111
+ }
112
+ elif name == "senate":
113
+ urls_to_download = {
114
+ "train": os.path.join(_DATA_URL, _SENATE_DEBATES_TRAIN_SET_FILE),
115
+ "test": os.path.join(_DATA_URL, _SENATE_DEBATES_TEST_SET_FILE),
116
+ }
117
+ else:
118
+ raise ValueError("Wrong builder config name '{}', it has to be either 'house' or 'senate'.".format(name))
119
+ downloaded_files = dl_manager.download_and_extract(urls_to_download)
120
+ if type(downloaded_files) == str:
121
+ downloaded_files = {k: downloaded_files for k in urls_to_download.keys()}
122
+ fr_files = {}
123
+ en_files = {}
124
+ for split_name in downloaded_files.keys():
125
+ archive_dir = "hansard.36/Release-2001.1a/sentence-pairs/{}/debates/development/{}".format(
126
+ name, split_name + "ing"
127
+ )
128
+ data_dir = os.path.join(downloaded_files[split_name], archive_dir)
129
+ split_compress_files = list(sorted(glob.glob(os.path.join(data_dir, "*.gz"))))
130
+ split_compress_files += list(sorted(glob.glob(os.path.join(data_dir, "**/*.gz"))))
131
+ fr_split_compress_files = sorted([f for f in split_compress_files if f.endswith(".f.gz")])
132
+ en_split_compress_files = sorted([f for f in split_compress_files if f.endswith(".e.gz")])
133
+ fr_files[split_name] = dl_manager.extract(fr_split_compress_files)
134
+ en_files[split_name] = dl_manager.extract(en_split_compress_files)
135
+ return [
136
+ datasets.SplitGenerator(
137
+ name=datasets.Split.TRAIN,
138
+ # These kwargs will be passed to _generate_examples
139
+ gen_kwargs={"fr_files": fr_files["train"], "en_files": en_files["train"]},
140
+ ),
141
+ datasets.SplitGenerator(
142
+ name=datasets.Split.TEST,
143
+ # These kwargs will be passed to _generate_examples
144
+ gen_kwargs={"fr_files": fr_files["test"], "en_files": en_files["test"]},
145
+ ),
146
+ ]
147
+
148
+ def _generate_examples(self, fr_files, en_files):
149
+ """Yields examples."""
150
+ # TODO(hansards): Yields (key, example) tuples from the dataset
151
+ for fr_file, en_file in zip(fr_files, en_files):
152
+ with open(fr_file, "rb") as fr:
153
+ with open(en_file, "rb") as en:
154
+ for j, (fr_line, en_line) in enumerate(zip(fr, en)):
155
+ line_id = "{}:{}".format(fr_file, j)
156
+ rec = {"fr": fr_line.decode("ISO-8859-1").strip(), "en": en_line.decode("ISO-8859-1").strip()}
157
+ yield line_id, rec