albertvillanova HF staff commited on
Commit
1b111ec
1 Parent(s): 15a7fb3

Convert dataset to Parquet (#2)

Browse files

- Convert dataset to Parquet (327c8575669846eb7ed8972e841d893354b6946f)
- Add monolingual_raw data files (f4ac9d58b4ac3d59fa6725eb61261bb08c702763)
- Add parallel_raw data files (12d7bbfdde2f5b493c7a25f919386d64c2dd25ff)
- Add monolingual data files (d310843c2bfcaa100c110289a0e43a62b5de4c8c)
- Delete loading script (d1fb0b24d17678b71d234843374f5512150d4aa2)

README.md CHANGED
@@ -28,7 +28,37 @@ task_ids:
28
  - language-modeling
29
  - masked-language-modeling
30
  paperswithcode_id: chren
 
 
 
 
 
31
  dataset_info:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
32
  - config_name: monolingual_raw
33
  features:
34
  - name: text_sentence
@@ -45,10 +75,36 @@ dataset_info:
45
  dtype: string
46
  splits:
47
  - name: full
48
- num_bytes: 1210828
49
  num_examples: 5210
50
- download_size: 28899321
51
- dataset_size: 1210828
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
52
  - config_name: parallel_raw
53
  features:
54
  - name: line_number
@@ -71,66 +127,46 @@ dataset_info:
71
  dtype: string
72
  splits:
73
  - name: full
74
- num_bytes: 5012923
75
  num_examples: 14151
76
- download_size: 28899321
77
- dataset_size: 5012923
 
78
  - config_name: monolingual
79
- features:
80
- - name: sentence
81
- dtype: string
82
- splits:
83
- - name: chr
84
- num_bytes: 882848
85
- num_examples: 5210
86
- - name: en5000
87
- num_bytes: 615295
88
- num_examples: 5000
89
- - name: en10000
90
- num_bytes: 1211645
91
- num_examples: 10000
92
- - name: en20000
93
- num_bytes: 2432378
94
- num_examples: 20000
95
- - name: en50000
96
- num_bytes: 6065780
97
- num_examples: 49999
98
- - name: en100000
99
- num_bytes: 12130564
100
- num_examples: 100000
101
- download_size: 28899321
102
- dataset_size: 23338510
103
  - config_name: parallel
104
- features:
105
- - name: sentence_pair
106
- dtype:
107
- translation:
108
- languages:
109
- - en
110
- - chr
111
- splits:
112
- - name: train
113
- num_bytes: 3089658
114
- num_examples: 11639
115
- - name: dev
116
- num_bytes: 260409
117
- num_examples: 1000
118
- - name: out_dev
119
- num_bytes: 78134
120
- num_examples: 256
121
- - name: test
122
- num_bytes: 264603
123
- num_examples: 1000
124
- - name: out_test
125
- num_bytes: 80967
126
- num_examples: 256
127
- download_size: 28899321
128
- dataset_size: 3773771
129
- config_names:
130
- - monolingual
131
- - monolingual_raw
132
- - parallel
133
- - parallel_raw
134
  ---
135
 
136
  # Dataset Card for ChrEn
28
  - language-modeling
29
  - masked-language-modeling
30
  paperswithcode_id: chren
31
+ config_names:
32
+ - monolingual
33
+ - monolingual_raw
34
+ - parallel
35
+ - parallel_raw
36
  dataset_info:
37
+ - config_name: monolingual
38
+ features:
39
+ - name: sentence
40
+ dtype: string
41
+ splits:
42
+ - name: chr
43
+ num_bytes: 882824
44
+ num_examples: 5210
45
+ - name: en5000
46
+ num_bytes: 615275
47
+ num_examples: 5000
48
+ - name: en10000
49
+ num_bytes: 1211605
50
+ num_examples: 10000
51
+ - name: en20000
52
+ num_bytes: 2432298
53
+ num_examples: 20000
54
+ - name: en50000
55
+ num_bytes: 6065580
56
+ num_examples: 49999
57
+ - name: en100000
58
+ num_bytes: 12130164
59
+ num_examples: 100000
60
+ download_size: 16967664
61
+ dataset_size: 23337746
62
  - config_name: monolingual_raw
63
  features:
64
  - name: text_sentence
75
  dtype: string
76
  splits:
77
  - name: full
78
+ num_bytes: 1210056
79
  num_examples: 5210
80
+ download_size: 410646
81
+ dataset_size: 1210056
82
+ - config_name: parallel
83
+ features:
84
+ - name: sentence_pair
85
+ dtype:
86
+ translation:
87
+ languages:
88
+ - en
89
+ - chr
90
+ splits:
91
+ - name: train
92
+ num_bytes: 3089562
93
+ num_examples: 11639
94
+ - name: dev
95
+ num_bytes: 260401
96
+ num_examples: 1000
97
+ - name: out_dev
98
+ num_bytes: 78126
99
+ num_examples: 256
100
+ - name: test
101
+ num_bytes: 264595
102
+ num_examples: 1000
103
+ - name: out_test
104
+ num_bytes: 80959
105
+ num_examples: 256
106
+ download_size: 2143266
107
+ dataset_size: 3773643
108
  - config_name: parallel_raw
109
  features:
110
  - name: line_number
127
  dtype: string
128
  splits:
129
  - name: full
130
+ num_bytes: 5010734
131
  num_examples: 14151
132
+ download_size: 2018726
133
+ dataset_size: 5010734
134
+ configs:
135
  - config_name: monolingual
136
+ data_files:
137
+ - split: chr
138
+ path: monolingual/chr-*
139
+ - split: en5000
140
+ path: monolingual/en5000-*
141
+ - split: en10000
142
+ path: monolingual/en10000-*
143
+ - split: en20000
144
+ path: monolingual/en20000-*
145
+ - split: en50000
146
+ path: monolingual/en50000-*
147
+ - split: en100000
148
+ path: monolingual/en100000-*
149
+ - config_name: monolingual_raw
150
+ data_files:
151
+ - split: full
152
+ path: monolingual_raw/full-*
 
 
 
 
 
 
 
153
  - config_name: parallel
154
+ data_files:
155
+ - split: train
156
+ path: parallel/train-*
157
+ - split: dev
158
+ path: parallel/dev-*
159
+ - split: out_dev
160
+ path: parallel/out_dev-*
161
+ - split: test
162
+ path: parallel/test-*
163
+ - split: out_test
164
+ path: parallel/out_test-*
165
+ default: true
166
+ - config_name: parallel_raw
167
+ data_files:
168
+ - split: full
169
+ path: parallel_raw/full-*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
170
  ---
171
 
172
  # Dataset Card for ChrEn
chr_en.py DELETED
@@ -1,202 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
- """ChrEn: Cherokee-English Machine Translation data"""
16
-
17
-
18
- import openpyxl # noqa: requires this pandas optional dependency for reading xlsx files
19
- import pandas as pd
20
-
21
- import datasets
22
-
23
-
24
- _CITATION = """\
25
- @inproceedings{zhang2020chren,
26
- title={ChrEn: Cherokee-English Machine Translation for Endangered Language Revitalization},
27
- author={Zhang, Shiyue and Frey, Benjamin and Bansal, Mohit},
28
- booktitle={EMNLP2020},
29
- year={2020}
30
- }
31
- """
32
-
33
- _DESCRIPTION = """\
34
- ChrEn is a Cherokee-English parallel dataset to facilitate machine translation research between Cherokee and English.
35
- ChrEn is extremely low-resource contains 14k sentence pairs in total, split in ways that facilitate both in-domain and out-of-domain evaluation.
36
- ChrEn also contains 5k Cherokee monolingual data to enable semi-supervised learning.
37
- """
38
-
39
- _HOMEPAGE = "https://github.com/ZhangShiyue/ChrEn"
40
-
41
- _LICENSE = ""
42
-
43
- _URLs = {
44
- "monolingual_raw": "https://github.com/ZhangShiyue/ChrEn/raw/main/data/raw/monolingual_data.xlsx",
45
- "parallel_raw": "https://github.com/ZhangShiyue/ChrEn/raw/main/data/raw/parallel_data.xlsx",
46
- "monolingual_chr": "https://raw.githubusercontent.com/ZhangShiyue/ChrEn/main/data/monolingual/chr",
47
- "monolingual_en5000": "https://raw.githubusercontent.com/ZhangShiyue/ChrEn/main/data/monolingual/en5000",
48
- "monolingual_en10000": "https://raw.githubusercontent.com/ZhangShiyue/ChrEn/main/data/monolingual/en10000",
49
- "monolingual_en20000": "https://raw.githubusercontent.com/ZhangShiyue/ChrEn/main/data/monolingual/en20000",
50
- "monolingual_en50000": "https://raw.githubusercontent.com/ZhangShiyue/ChrEn/main/data/monolingual/en50000",
51
- "monolingual_en100000": "https://raw.githubusercontent.com/ZhangShiyue/ChrEn/main/data/monolingual/en100000",
52
- "parallel_train.chr": "https://github.com/ZhangShiyue/ChrEn/raw/main/data/parallel/train.chr",
53
- "parallel_train.en": "https://github.com/ZhangShiyue/ChrEn/raw/main/data/parallel/train.en",
54
- "parallel_dev.chr": "https://github.com/ZhangShiyue/ChrEn/raw/main/data/parallel/dev.chr",
55
- "parallel_dev.en": "https://github.com/ZhangShiyue/ChrEn/raw/main/data/parallel/dev.en",
56
- "parallel_out_dev.chr": "https://github.com/ZhangShiyue/ChrEn/raw/main/data/parallel/out_dev.chr",
57
- "parallel_out_dev.en": "https://github.com/ZhangShiyue/ChrEn/raw/main/data/parallel/out_dev.en",
58
- "parallel_test.chr": "https://github.com/ZhangShiyue/ChrEn/raw/main/data/parallel/test.chr",
59
- "parallel_test.en": "https://github.com/ZhangShiyue/ChrEn/raw/main/data/parallel/test.en",
60
- "parallel_out_test.chr": "https://github.com/ZhangShiyue/ChrEn/raw/main/data/parallel/out_test.chr",
61
- "parallel_out_test.en": "https://github.com/ZhangShiyue/ChrEn/raw/main/data/parallel/out_test.en",
62
- }
63
-
64
-
65
- # TODO: Name of the dataset usually match the script name with CamelCase instead of snake_case
66
- class ChrEn(datasets.GeneratorBasedBuilder):
67
- """ChrEn: Cherokee-English Machine Translation data."""
68
-
69
- VERSION = datasets.Version("1.0.0")
70
-
71
- BUILDER_CONFIGS = [
72
- datasets.BuilderConfig(name="monolingual_raw", version=VERSION, description="Monolingual data with metadata"),
73
- datasets.BuilderConfig(name="parallel_raw", version=VERSION, description="Parallel data with metadata"),
74
- datasets.BuilderConfig(name="monolingual", version=VERSION, description="Monolingual data text only"),
75
- datasets.BuilderConfig(
76
- name="parallel", version=VERSION, description="Parallel data text pairs only with default split"
77
- ),
78
- ]
79
-
80
- DEFAULT_CONFIG_NAME = (
81
- "parallel" # It's not mandatory to have a default configuration. Just use one if it make sense.
82
- )
83
-
84
- def _info(self):
85
- if (
86
- self.config.name == "monolingual_raw"
87
- ): # This is the name of the configuration selected in BUILDER_CONFIGS above
88
- features = datasets.Features(
89
- {
90
- "text_sentence": datasets.Value("string"),
91
- "text_title": datasets.Value("string"),
92
- "speaker": datasets.Value("string"),
93
- "date": datasets.Value("int32"),
94
- "type": datasets.Value("string"),
95
- "dialect": datasets.Value("string"),
96
- }
97
- )
98
- elif (
99
- self.config.name == "parallel_raw"
100
- ): # This is the name of the configuration selected in BUILDER_CONFIGS above
101
- features = datasets.Features(
102
- {
103
- "line_number": datasets.Value("string"), # doesn't always map to a number
104
- "sentence_pair": datasets.Translation(languages=["en", "chr"]),
105
- "text_title": datasets.Value("string"),
106
- "speaker": datasets.Value("string"),
107
- "date": datasets.Value("int32"),
108
- "type": datasets.Value("string"),
109
- "dialect": datasets.Value("string"),
110
- }
111
- )
112
- elif (
113
- self.config.name == "parallel"
114
- ): # This is an example to show how to have different features for "first_domain" and "second_domain"
115
- features = datasets.Features(
116
- {
117
- "sentence_pair": datasets.Translation(languages=["en", "chr"]),
118
- }
119
- )
120
- elif (
121
- self.config.name == "monolingual"
122
- ): # This is an example to show how to have different features for "first_domain" and "second_domain"
123
- features = datasets.Features(
124
- {
125
- "sentence": datasets.Value("string"),
126
- }
127
- )
128
- return datasets.DatasetInfo(
129
- description=_DESCRIPTION,
130
- features=features, # Here we define them above because they are different between the two configurations
131
- supervised_keys=None,
132
- homepage=_HOMEPAGE,
133
- license=_LICENSE,
134
- citation=_CITATION,
135
- )
136
-
137
- def _split_generators(self, dl_manager):
138
- """Returns SplitGenerators."""
139
- data_dir = dl_manager.download(_URLs)
140
- if self.config.name in [
141
- "monolingual_raw",
142
- "parallel_raw",
143
- ]: # This is the name of the configuration selected in BUILDER_CONFIGS above
144
- return [
145
- datasets.SplitGenerator(
146
- name="full",
147
- gen_kwargs={
148
- "filepaths": data_dir,
149
- "split": "full",
150
- },
151
- )
152
- ]
153
- elif self.config.name == "monolingual":
154
- return [
155
- datasets.SplitGenerator(
156
- name=spl,
157
- gen_kwargs={
158
- "filepaths": data_dir,
159
- "split": spl,
160
- },
161
- )
162
- for spl in ["chr", "en5000", "en10000", "en20000", "en50000", "en100000"]
163
- ]
164
- else:
165
- return [
166
- datasets.SplitGenerator(
167
- name=spl,
168
- gen_kwargs={
169
- "filepaths": data_dir,
170
- "split": spl,
171
- },
172
- )
173
- for spl in ["train", "dev", "out_dev", "test", "out_test"]
174
- ]
175
-
176
- def _generate_examples(self, filepaths, split):
177
- if self.config.name == "monolingual_raw":
178
- keys = ["text_sentence", "text_title", "speaker", "date", "type", "dialect"]
179
- with open(filepaths["monolingual_raw"], "rb") as f:
180
- monolingual = pd.read_excel(f, engine="openpyxl")
181
- for id_, row in enumerate(monolingual.itertuples()):
182
- yield id_, dict(zip(keys, row[1:]))
183
- elif self.config.name == "parallel_raw":
184
- keys = ["line_number", "en_sent", "chr_sent", "text_title", "speaker", "date", "type", "dialect"]
185
- with open(filepaths["parallel_raw"], "rb") as f:
186
- parallel = pd.read_excel(f, engine="openpyxl")
187
- for id_, row in enumerate(parallel.itertuples()):
188
- res = dict(zip(keys, row[1:]))
189
- res["sentence_pair"] = {"en": res["en_sent"], "chr": res["chr_sent"]}
190
- res["line_number"] = str(res["line_number"])
191
- del res["en_sent"]
192
- del res["chr_sent"]
193
- yield id_, res
194
- elif self.config.name == "monolingual":
195
- f = open(filepaths[f"monolingual_{split}"], encoding="utf-8")
196
- for id_, line in enumerate(f):
197
- yield id_, {"sentence": line.strip()}
198
- elif self.config.name == "parallel":
199
- fi = open(filepaths[f"parallel_{split}.en"], encoding="utf-8")
200
- fo = open(filepaths[f"parallel_{split}.chr"], encoding="utf-8")
201
- for id_, (line_en, line_chr) in enumerate(zip(fi, fo)):
202
- yield id_, {"sentence_pair": {"en": line_en.strip(), "chr": line_chr.strip()}}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
monolingual/chr-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f58043d307a5b16be45c0a6b0b7b0b24c63d74c5ef272bee5d15361ea38cdfd5
3
+ size 405534
monolingual/en10000-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b25c2a29e309589eb0998f6a518aaa1437d520c658931c9e70ba96b81e9c04e2
3
+ size 892978
monolingual/en100000-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:05a54678e4be29663a46394aa11f3e1a4e8ff1e7965dc8676b199aa4652db36d
3
+ size 8950197
monolingual/en20000-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1236f38c7a0f5d8d01a58f42fc2a0325ded5c3d7259c6db8fc83dd12828d2255
3
+ size 1791769
monolingual/en5000-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a4acb72c3795559a3328acd7656363645055c63dcc8734e05fdcc43059d6f79d
3
+ size 453371
monolingual/en50000-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:064b11df8b7662e3e5f0179912398e2bfca0d35e83176a1555942638af6d85a0
3
+ size 4473815
monolingual_raw/full-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cb137e5a03741910ef435892680784eb22c9fe17e053cc6957d96ec0db1d33bb
3
+ size 410646
parallel/dev-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bacb6a89ecf6cea31c7624af46042adbabdcfc412e412e8676f0e885a5a38eaf
3
+ size 148800
parallel/out_dev-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:af0e9d173a04818f268c12283afa94cc8815b325a74f936834fbc09e39a699c8
3
+ size 46297
parallel/out_test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:68ce7fff5cd9a1fac784e04310da98dd40007b132ea55e99aafc887f3149ee0d
3
+ size 48497
parallel/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:da599563b15d3edb91c698ba91d581609e758548aab6c16dc06c819f26ec0353
3
+ size 150963
parallel/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2012c4422f466995cce384f962d95f7203ad6fd4ef1078e7862c4058593271de
3
+ size 1748709
parallel_raw/full-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4e4abc41a6b2243b23b098b21daf1ce603dbc96202f4e5807b83f2aa8477b151
3
+ size 2018726