parquet-converter commited on
Commit
b50a268
1 Parent(s): 0d76635

Update parquet files

Browse files
.gitattributes DELETED
@@ -1,27 +0,0 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
- *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bin.* filter=lfs diff=lfs merge=lfs -text
5
- *.bz2 filter=lfs diff=lfs merge=lfs -text
6
- *.ftz filter=lfs diff=lfs merge=lfs -text
7
- *.gz filter=lfs diff=lfs merge=lfs -text
8
- *.h5 filter=lfs diff=lfs merge=lfs -text
9
- *.joblib filter=lfs diff=lfs merge=lfs -text
10
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.model filter=lfs diff=lfs merge=lfs -text
12
- *.msgpack filter=lfs diff=lfs merge=lfs -text
13
- *.onnx filter=lfs diff=lfs merge=lfs -text
14
- *.ot filter=lfs diff=lfs merge=lfs -text
15
- *.parquet filter=lfs diff=lfs merge=lfs -text
16
- *.pb filter=lfs diff=lfs merge=lfs -text
17
- *.pt filter=lfs diff=lfs merge=lfs -text
18
- *.pth filter=lfs diff=lfs merge=lfs -text
19
- *.rar filter=lfs diff=lfs merge=lfs -text
20
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
21
- *.tar.* filter=lfs diff=lfs merge=lfs -text
22
- *.tflite filter=lfs diff=lfs merge=lfs -text
23
- *.tgz filter=lfs diff=lfs merge=lfs -text
24
- *.xz filter=lfs diff=lfs merge=lfs -text
25
- *.zip filter=lfs diff=lfs merge=lfs -text
26
- *.zstandard filter=lfs diff=lfs merge=lfs -text
27
- *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
README.md DELETED
@@ -1,263 +0,0 @@
1
- ---
2
- annotations_creators:
3
- - expert-generated
4
- - found
5
- - no-annotation
6
- language_creators:
7
- - found
8
- language:
9
- - chr
10
- - en
11
- license:
12
- - other
13
- multilinguality:
14
- - monolingual
15
- - multilingual
16
- - translation
17
- size_categories:
18
- - 100K<n<1M
19
- - 10K<n<100K
20
- - 1K<n<10K
21
- source_datasets:
22
- - original
23
- task_categories:
24
- - fill-mask
25
- - text-generation
26
- - translation
27
- task_ids:
28
- - language-modeling
29
- - masked-language-modeling
30
- paperswithcode_id: chren
31
- configs:
32
- - monolingual
33
- - monolingual_raw
34
- - parallel
35
- - parallel_raw
36
- dataset_info:
37
- - config_name: monolingual_raw
38
- features:
39
- - name: text_sentence
40
- dtype: string
41
- - name: text_title
42
- dtype: string
43
- - name: speaker
44
- dtype: string
45
- - name: date
46
- dtype: int32
47
- - name: type
48
- dtype: string
49
- - name: dialect
50
- dtype: string
51
- splits:
52
- - name: full
53
- num_bytes: 1210828
54
- num_examples: 5210
55
- download_size: 28899321
56
- dataset_size: 1210828
57
- - config_name: parallel_raw
58
- features:
59
- - name: line_number
60
- dtype: string
61
- - name: sentence_pair
62
- dtype:
63
- translation:
64
- languages:
65
- - en
66
- - chr
67
- - name: text_title
68
- dtype: string
69
- - name: speaker
70
- dtype: string
71
- - name: date
72
- dtype: int32
73
- - name: type
74
- dtype: string
75
- - name: dialect
76
- dtype: string
77
- splits:
78
- - name: full
79
- num_bytes: 5012923
80
- num_examples: 14151
81
- download_size: 28899321
82
- dataset_size: 5012923
83
- - config_name: monolingual
84
- features:
85
- - name: sentence
86
- dtype: string
87
- splits:
88
- - name: chr
89
- num_bytes: 882848
90
- num_examples: 5210
91
- - name: en5000
92
- num_bytes: 615295
93
- num_examples: 5000
94
- - name: en10000
95
- num_bytes: 1211645
96
- num_examples: 10000
97
- - name: en20000
98
- num_bytes: 2432378
99
- num_examples: 20000
100
- - name: en50000
101
- num_bytes: 6065780
102
- num_examples: 49999
103
- - name: en100000
104
- num_bytes: 12130564
105
- num_examples: 100000
106
- download_size: 28899321
107
- dataset_size: 23338510
108
- - config_name: parallel
109
- features:
110
- - name: sentence_pair
111
- dtype:
112
- translation:
113
- languages:
114
- - en
115
- - chr
116
- splits:
117
- - name: train
118
- num_bytes: 3089658
119
- num_examples: 11639
120
- - name: dev
121
- num_bytes: 260409
122
- num_examples: 1000
123
- - name: out_dev
124
- num_bytes: 78134
125
- num_examples: 256
126
- - name: test
127
- num_bytes: 264603
128
- num_examples: 1000
129
- - name: out_test
130
- num_bytes: 80967
131
- num_examples: 256
132
- download_size: 28899321
133
- dataset_size: 3773771
134
- ---
135
-
136
- # Dataset Card for ChrEn
137
-
138
- ## Table of Contents
139
- - [Dataset Description](#dataset-description)
140
- - [Dataset Summary](#dataset-summary)
141
- - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
142
- - [Languages](#languages)
143
- - [Dataset Structure](#dataset-structure)
144
- - [Data Instances](#data-instances)
145
- - [Data Fields](#data-fields)
146
- - [Data Splits](#data-splits)
147
- - [Dataset Creation](#dataset-creation)
148
- - [Curation Rationale](#curation-rationale)
149
- - [Source Data](#source-data)
150
- - [Annotations](#annotations)
151
- - [Personal and Sensitive Information](#personal-and-sensitive-information)
152
- - [Considerations for Using the Data](#considerations-for-using-the-data)
153
- - [Social Impact of Dataset](#social-impact-of-dataset)
154
- - [Discussion of Biases](#discussion-of-biases)
155
- - [Other Known Limitations](#other-known-limitations)
156
- - [Additional Information](#additional-information)
157
- - [Dataset Curators](#dataset-curators)
158
- - [Licensing Information](#licensing-information)
159
- - [Citation Information](#citation-information)
160
- - [Contributions](#contributions)
161
-
162
- ## Dataset Description
163
-
164
- - **Repository:** [Github repository for ChrEn](https://github.com/ZhangShiyue/ChrEn)
165
- - **Paper:** [ChrEn: Cherokee-English Machine Translation for Endangered Language Revitalization](https://arxiv.org/abs/2010.04791)
166
- - **Point of Contact:** [benfrey@email.unc.edu](benfrey@email.unc.edu)
167
-
168
- ### Dataset Summary
169
-
170
- ChrEn is a Cherokee-English parallel dataset to facilitate machine translation research between Cherokee and English.
171
- ChrEn is extremely low-resource contains 14k sentence pairs in total, split in ways that facilitate both in-domain and out-of-domain evaluation.
172
- ChrEn also contains 5k Cherokee monolingual data to enable semi-supervised learning.
173
-
174
- ### Supported Tasks and Leaderboards
175
-
176
- The dataset is intended to use for `machine-translation` between Enlish (`en`) and Cherokee (`chr`).
177
-
178
- ### Languages
179
-
180
- The dataset contains Enlish (`en`) and Cherokee (`chr`) text. The data encompasses both existing dialects of Cherokee: the Overhill dialect, mostly spoken in Oklahoma (OK), and the Middle dialect, mostly used in North Carolina (NC).
181
-
182
- ## Dataset Structure
183
-
184
- ### Data Instances
185
-
186
- [More Information Needed]
187
-
188
- ### Data Fields
189
-
190
- [More Information Needed]
191
-
192
- ### Data Splits
193
-
194
- [More Information Needed]
195
-
196
- ## Dataset Creation
197
-
198
- ### Curation Rationale
199
-
200
- [More Information Needed]
201
-
202
- ### Source Data
203
-
204
- #### Initial Data Collection and Normalization
205
-
206
- Many of the source texts were translations of English materials, which means that the Cherokee structures may not be 100% natural in terms of what a speaker might spontaneously produce. Each text was translated by people who speak Cherokee as the first language, which means there is a high probability of grammaticality. These data were originally available in PDF version. We apply the Optical Character Recognition (OCR) via Tesseract OCR engine to extract the Cherokee and English text.
207
-
208
- #### Who are the source language producers?
209
-
210
- [More Information Needed]
211
-
212
- ### Annotations
213
-
214
- #### Annotation process
215
-
216
- [More Information Needed]
217
-
218
- #### Who are the annotators?
219
-
220
- The sentences were manually aligned by Dr. Benjamin Frey a proficient second-language speaker of Cherokee, who also fixed the errors introduced by OCR. This process is time-consuming and took several months.
221
-
222
- ### Personal and Sensitive Information
223
-
224
- [More Information Needed]
225
-
226
- ## Considerations for Using the Data
227
-
228
- ### Social Impact of Dataset
229
-
230
- [More Information Needed]
231
-
232
- ### Discussion of Biases
233
-
234
- [More Information Needed]
235
-
236
- ### Other Known Limitations
237
-
238
- [More Information Needed]
239
-
240
- ## Additional Information
241
-
242
- ### Dataset Curators
243
-
244
- The dataset was gathered and annotated by Shiyue Zhang, Benjamin Frey, and Mohit Bansal at UNC Chapel Hill.
245
-
246
- ### Licensing Information
247
-
248
- The copyright of the data belongs to original book/article authors or translators (hence, used for research purpose; and please contact Dr. Benjamin Frey for other copyright questions).
249
-
250
- ### Citation Information
251
-
252
- ```
253
- @inproceedings{zhang2020chren,
254
- title={ChrEn: Cherokee-English Machine Translation for Endangered Language Revitalization},
255
- author={Zhang, Shiyue and Frey, Benjamin and Bansal, Mohit},
256
- booktitle={EMNLP2020},
257
- year={2020}
258
- }
259
- ```
260
-
261
- ### Contributions
262
-
263
- Thanks to [@yjernite](https://github.com/yjernite), [@lhoestq](https://github.com/lhoestq) for adding this dataset.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
chr_en.py DELETED
@@ -1,202 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
- """ChrEn: Cherokee-English Machine Translation data"""
16
-
17
-
18
- import openpyxl # noqa: requires this pandas optional dependency for reading xlsx files
19
- import pandas as pd
20
-
21
- import datasets
22
-
23
-
24
- _CITATION = """\
25
- @inproceedings{zhang2020chren,
26
- title={ChrEn: Cherokee-English Machine Translation for Endangered Language Revitalization},
27
- author={Zhang, Shiyue and Frey, Benjamin and Bansal, Mohit},
28
- booktitle={EMNLP2020},
29
- year={2020}
30
- }
31
- """
32
-
33
- _DESCRIPTION = """\
34
- ChrEn is a Cherokee-English parallel dataset to facilitate machine translation research between Cherokee and English.
35
- ChrEn is extremely low-resource contains 14k sentence pairs in total, split in ways that facilitate both in-domain and out-of-domain evaluation.
36
- ChrEn also contains 5k Cherokee monolingual data to enable semi-supervised learning.
37
- """
38
-
39
- _HOMEPAGE = "https://github.com/ZhangShiyue/ChrEn"
40
-
41
- _LICENSE = ""
42
-
43
- _URLs = {
44
- "monolingual_raw": "https://github.com/ZhangShiyue/ChrEn/raw/main/data/raw/monolingual_data.xlsx",
45
- "parallel_raw": "https://github.com/ZhangShiyue/ChrEn/raw/main/data/raw/parallel_data.xlsx",
46
- "monolingual_chr": "https://raw.githubusercontent.com/ZhangShiyue/ChrEn/main/data/monolingual/chr",
47
- "monolingual_en5000": "https://raw.githubusercontent.com/ZhangShiyue/ChrEn/main/data/monolingual/en5000",
48
- "monolingual_en10000": "https://raw.githubusercontent.com/ZhangShiyue/ChrEn/main/data/monolingual/en10000",
49
- "monolingual_en20000": "https://raw.githubusercontent.com/ZhangShiyue/ChrEn/main/data/monolingual/en20000",
50
- "monolingual_en50000": "https://raw.githubusercontent.com/ZhangShiyue/ChrEn/main/data/monolingual/en50000",
51
- "monolingual_en100000": "https://raw.githubusercontent.com/ZhangShiyue/ChrEn/main/data/monolingual/en100000",
52
- "parallel_train.chr": "https://github.com/ZhangShiyue/ChrEn/raw/main/data/parallel/train.chr",
53
- "parallel_train.en": "https://github.com/ZhangShiyue/ChrEn/raw/main/data/parallel/train.en",
54
- "parallel_dev.chr": "https://github.com/ZhangShiyue/ChrEn/raw/main/data/parallel/dev.chr",
55
- "parallel_dev.en": "https://github.com/ZhangShiyue/ChrEn/raw/main/data/parallel/dev.en",
56
- "parallel_out_dev.chr": "https://github.com/ZhangShiyue/ChrEn/raw/main/data/parallel/out_dev.chr",
57
- "parallel_out_dev.en": "https://github.com/ZhangShiyue/ChrEn/raw/main/data/parallel/out_dev.en",
58
- "parallel_test.chr": "https://github.com/ZhangShiyue/ChrEn/raw/main/data/parallel/test.chr",
59
- "parallel_test.en": "https://github.com/ZhangShiyue/ChrEn/raw/main/data/parallel/test.en",
60
- "parallel_out_test.chr": "https://github.com/ZhangShiyue/ChrEn/raw/main/data/parallel/out_test.chr",
61
- "parallel_out_test.en": "https://github.com/ZhangShiyue/ChrEn/raw/main/data/parallel/out_test.en",
62
- }
63
-
64
-
65
- # TODO: Name of the dataset usually match the script name with CamelCase instead of snake_case
66
- class ChrEn(datasets.GeneratorBasedBuilder):
67
- """ChrEn: Cherokee-English Machine Translation data."""
68
-
69
- VERSION = datasets.Version("1.0.0")
70
-
71
- BUILDER_CONFIGS = [
72
- datasets.BuilderConfig(name="monolingual_raw", version=VERSION, description="Monolingual data with metadata"),
73
- datasets.BuilderConfig(name="parallel_raw", version=VERSION, description="Parallel data with metadata"),
74
- datasets.BuilderConfig(name="monolingual", version=VERSION, description="Monolingual data text only"),
75
- datasets.BuilderConfig(
76
- name="parallel", version=VERSION, description="Parallel data text pairs only with default split"
77
- ),
78
- ]
79
-
80
- DEFAULT_CONFIG_NAME = (
81
- "parallel" # It's not mandatory to have a default configuration. Just use one if it make sense.
82
- )
83
-
84
- def _info(self):
85
- if (
86
- self.config.name == "monolingual_raw"
87
- ): # This is the name of the configuration selected in BUILDER_CONFIGS above
88
- features = datasets.Features(
89
- {
90
- "text_sentence": datasets.Value("string"),
91
- "text_title": datasets.Value("string"),
92
- "speaker": datasets.Value("string"),
93
- "date": datasets.Value("int32"),
94
- "type": datasets.Value("string"),
95
- "dialect": datasets.Value("string"),
96
- }
97
- )
98
- elif (
99
- self.config.name == "parallel_raw"
100
- ): # This is the name of the configuration selected in BUILDER_CONFIGS above
101
- features = datasets.Features(
102
- {
103
- "line_number": datasets.Value("string"), # doesn't always map to a number
104
- "sentence_pair": datasets.Translation(languages=["en", "chr"]),
105
- "text_title": datasets.Value("string"),
106
- "speaker": datasets.Value("string"),
107
- "date": datasets.Value("int32"),
108
- "type": datasets.Value("string"),
109
- "dialect": datasets.Value("string"),
110
- }
111
- )
112
- elif (
113
- self.config.name == "parallel"
114
- ): # This is an example to show how to have different features for "first_domain" and "second_domain"
115
- features = datasets.Features(
116
- {
117
- "sentence_pair": datasets.Translation(languages=["en", "chr"]),
118
- }
119
- )
120
- elif (
121
- self.config.name == "monolingual"
122
- ): # This is an example to show how to have different features for "first_domain" and "second_domain"
123
- features = datasets.Features(
124
- {
125
- "sentence": datasets.Value("string"),
126
- }
127
- )
128
- return datasets.DatasetInfo(
129
- description=_DESCRIPTION,
130
- features=features, # Here we define them above because they are different between the two configurations
131
- supervised_keys=None,
132
- homepage=_HOMEPAGE,
133
- license=_LICENSE,
134
- citation=_CITATION,
135
- )
136
-
137
- def _split_generators(self, dl_manager):
138
- """Returns SplitGenerators."""
139
- data_dir = dl_manager.download(_URLs)
140
- if self.config.name in [
141
- "monolingual_raw",
142
- "parallel_raw",
143
- ]: # This is the name of the configuration selected in BUILDER_CONFIGS above
144
- return [
145
- datasets.SplitGenerator(
146
- name="full",
147
- gen_kwargs={
148
- "filepaths": data_dir,
149
- "split": "full",
150
- },
151
- )
152
- ]
153
- elif self.config.name == "monolingual":
154
- return [
155
- datasets.SplitGenerator(
156
- name=spl,
157
- gen_kwargs={
158
- "filepaths": data_dir,
159
- "split": spl,
160
- },
161
- )
162
- for spl in ["chr", "en5000", "en10000", "en20000", "en50000", "en100000"]
163
- ]
164
- else:
165
- return [
166
- datasets.SplitGenerator(
167
- name=spl,
168
- gen_kwargs={
169
- "filepaths": data_dir,
170
- "split": spl,
171
- },
172
- )
173
- for spl in ["train", "dev", "out_dev", "test", "out_test"]
174
- ]
175
-
176
- def _generate_examples(self, filepaths, split):
177
- if self.config.name == "monolingual_raw":
178
- keys = ["text_sentence", "text_title", "speaker", "date", "type", "dialect"]
179
- with open(filepaths["monolingual_raw"], "rb") as f:
180
- monolingual = pd.read_excel(f, engine="openpyxl")
181
- for id_, row in enumerate(monolingual.itertuples()):
182
- yield id_, dict(zip(keys, row[1:]))
183
- elif self.config.name == "parallel_raw":
184
- keys = ["line_number", "en_sent", "chr_sent", "text_title", "speaker", "date", "type", "dialect"]
185
- with open(filepaths["parallel_raw"], "rb") as f:
186
- parallel = pd.read_excel(f, engine="openpyxl")
187
- for id_, row in enumerate(parallel.itertuples()):
188
- res = dict(zip(keys, row[1:]))
189
- res["sentence_pair"] = {"en": res["en_sent"], "chr": res["chr_sent"]}
190
- res["line_number"] = str(res["line_number"])
191
- del res["en_sent"]
192
- del res["chr_sent"]
193
- yield id_, res
194
- elif self.config.name == "monolingual":
195
- f = open(filepaths[f"monolingual_{split}"], encoding="utf-8")
196
- for id_, line in enumerate(f):
197
- yield id_, {"sentence": line.strip()}
198
- elif self.config.name == "parallel":
199
- fi = open(filepaths[f"parallel_{split}.en"], encoding="utf-8")
200
- fo = open(filepaths[f"parallel_{split}.chr"], encoding="utf-8")
201
- for id_, (line_en, line_chr) in enumerate(zip(fi, fo)):
202
- yield id_, {"sentence_pair": {"en": line_en.strip(), "chr": line_chr.strip()}}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
monolingual/chr_en-chr.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:85cf9248ea42b826588fb99fffbc4af7ea352c7b4220caafbd15e85c5f040048
3
+ size 405533
monolingual/chr_en-en10000.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:855fe6054f2075d7254b3024a6e2a8d9e0dc3e68c91241d85b9a5720fe628170
3
+ size 892977
monolingual/chr_en-en100000.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a31ef31f77fbd5f9ceb9cd93d0cccd56401d1f8927ddb15e068d7871a5517292
3
+ size 8950196
monolingual/chr_en-en20000.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f1d03d8fa0d04f11d0221b1fbfa0e0ea3c1761597903b0b88d7f087ce1d2cdb6
3
+ size 1791768
monolingual/chr_en-en5000.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7b5755f94bef9a7143daafcef61f92ccfc7f98a8df5d8fc38c1bdf62138e1b8b
3
+ size 453370
monolingual/chr_en-en50000.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:eccbaf837d22f1c04e86e74d89f3a826f1283d4d73778d3d0ad1a9ff79f321ed
3
+ size 4473814
monolingual_raw/chr_en-full.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4dfcad62b2b67c250ff3147be81acd688d920691cadf00a3ff74bf73eebfdf4b
3
+ size 410645
parallel/chr_en-dev.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:556cf812a66a64d074d95a31a674117c8ebc7ac7b8f38a5f8810fb2c7528a436
3
+ size 148799
parallel/chr_en-out_dev.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fb5d3951487d725bc0b04088eee145574d8d610ec428e73ea1f754f6cad29778
3
+ size 46296
parallel/chr_en-out_test.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b4541249b01d4ac4cc9042adfae78440b3d0f1e6e589f0773d1c57203bdb08bb
3
+ size 48496
parallel/chr_en-test.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7255520f18eb4bceb963c77740cff19d9cc3850e22f3f4782ded6b41fc552948
3
+ size 150962
parallel/chr_en-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e293688571c0ef6d8f72f3c73af449295f8e6b7a5a3735ff6f136c7ddae7192d
3
+ size 1748708
parallel_raw/chr_en-full.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c47b924800fa384bb2c98b631dfe3088ea11684e3d17748f01c0aaa541583b58
3
+ size 2018725