Datasets:

Modalities:
Text
Formats:
parquet
ArXiv:
Libraries:
Datasets
pandas
License:
albertvillanova HF staff commited on
Commit
7fa5a02
1 Parent(s): c2f33d0

Convert dataset to Parquet (#1)

Browse files

- Convert dataset to Parquet (3ef0b39da3649ece9b105c5d77d80de1d3934d0c)
- Add en_to_tr data files (24a3e76610e2c031cc9acaee85423e42a4f547ca)
- Add ku_to_tr data files (09dc4042a1b6a1f5968ed0cfc87143c54f75f31e)
- Delete loading script (8b476680a986ecc2a2bbfb8ed4347f90c162e9e2)
- Delete legacy dataset_infos.json (b6865e3797324fb032757efeffe8f311c879f9a2)

README.md CHANGED
@@ -21,6 +21,13 @@ task_categories:
21
  task_ids: []
22
  paperswithcode_id: bianet
23
  pretty_name: Bianet
 
 
 
 
 
 
 
24
  dataset_info:
25
  - config_name: en_to_ku
26
  features:
@@ -34,10 +41,10 @@ dataset_info:
34
  - ku
35
  splits:
36
  - name: train
37
- num_bytes: 1800806
38
  num_examples: 6402
39
- download_size: 622420
40
- dataset_size: 1800806
41
  - config_name: en_to_tr
42
  features:
43
  - name: id
@@ -50,10 +57,10 @@ dataset_info:
50
  - tr
51
  splits:
52
  - name: train
53
- num_bytes: 10231043
54
  num_examples: 34770
55
- download_size: 3544116
56
- dataset_size: 10231043
57
  - config_name: ku_to_tr
58
  features:
59
  - name: id
@@ -66,17 +73,23 @@ dataset_info:
66
  - tr
67
  splits:
68
  - name: train
69
- num_bytes: 2086550
70
  num_examples: 7325
71
- download_size: 725227
72
- dataset_size: 2086550
73
- config_names:
74
- - en-to-ku
75
- - en-to-tr
76
- - en_to_ku
77
- - en_to_tr
78
- - ku-to-tr
79
- - ku_to_tr
 
 
 
 
 
 
80
  ---
81
 
82
  # Dataset Card for [Dataset Name]
 
21
  task_ids: []
22
  paperswithcode_id: bianet
23
  pretty_name: Bianet
24
+ config_names:
25
+ - en-to-ku
26
+ - en-to-tr
27
+ - en_to_ku
28
+ - en_to_tr
29
+ - ku-to-tr
30
+ - ku_to_tr
31
  dataset_info:
32
  - config_name: en_to_ku
33
  features:
 
41
  - ku
42
  splits:
43
  - name: train
44
+ num_bytes: 1800794
45
  num_examples: 6402
46
+ download_size: 1019265
47
+ dataset_size: 1800794
48
  - config_name: en_to_tr
49
  features:
50
  - name: id
 
57
  - tr
58
  splits:
59
  - name: train
60
+ num_bytes: 10230995
61
  num_examples: 34770
62
+ download_size: 5932117
63
+ dataset_size: 10230995
64
  - config_name: ku_to_tr
65
  features:
66
  - name: id
 
73
  - tr
74
  splits:
75
  - name: train
76
+ num_bytes: 2086538
77
  num_examples: 7325
78
+ download_size: 1206133
79
+ dataset_size: 2086538
80
+ configs:
81
+ - config_name: en_to_ku
82
+ data_files:
83
+ - split: train
84
+ path: en_to_ku/train-*
85
+ - config_name: en_to_tr
86
+ data_files:
87
+ - split: train
88
+ path: en_to_tr/train-*
89
+ - config_name: ku_to_tr
90
+ data_files:
91
+ - split: train
92
+ path: ku_to_tr/train-*
93
  ---
94
 
95
  # Dataset Card for [Dataset Name]
bianet.py DELETED
@@ -1,148 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
- """Bianet: A parallel news corpus in Turkish, Kurdish and English"""
16
-
17
-
18
- import os
19
-
20
- import datasets
21
-
22
-
23
- _CITATION = """\
24
- @InProceedings{ATAMAN18.6,
25
- author = {Duygu Ataman},
26
- title = {Bianet: A Parallel News Corpus in Turkish, Kurdish and English},
27
- booktitle = {Proceedings of the Eleventh International Conference on Language Resources and Evaluation (LREC 2018)},
28
- year = {2018},
29
- month = {may},
30
- date = {7-12},
31
- location = {Miyazaki, Japan},
32
- editor = {Jinhua Du and Mihael Arcan and Qun Liu and Hitoshi Isahara},
33
- publisher = {European Language Resources Association (ELRA)},
34
- address = {Paris, France},
35
- isbn = {979-10-95546-15-3},
36
- language = {english}
37
- }"""
38
-
39
- _HOMEPAGE = "http://opus.nlpl.eu/Bianet.php"
40
-
41
-
42
- _LICENSE = "CC-BY-SA-4.0"
43
-
44
- _VALID_LANGUAGE_PAIRS = {
45
- ("en", "ku"): "http://opus.nlpl.eu/download.php?f=Bianet/v1/moses/en-ku.txt.zip",
46
- ("en", "tr"): "http://opus.nlpl.eu/download.php?f=Bianet/v1/moses/en-tr.txt.zip",
47
- ("ku", "tr"): "http://opus.nlpl.eu/download.php?f=Bianet/v1/moses/ku-tr.txt.zip",
48
- }
49
-
50
- _VERSION = "1.0.0"
51
-
52
- _DESCRIPTION = """\
53
- A parallel news corpus in Turkish, Kurdish and English.
54
- Bianet collects 3,214 Turkish articles with their sentence-aligned Kurdish or English translations from the Bianet online newspaper.
55
- 3 languages, 3 bitexts
56
- total number of files: 6
57
- total number of tokens: 2.25M
58
- total number of sentence fragments: 0.14M
59
- """
60
-
61
- _BASE_NAME = "Bianet.{}-{}.{}"
62
-
63
-
64
- class BianetConfig(datasets.BuilderConfig):
65
- """BuilderConfig for Bianet: A parallel news corpus in Turkish, Kurdish and English"""
66
-
67
- def __init__(self, language_pair=(None, None), **kwargs):
68
- """BuilderConfig for Bianet: A parallel news corpus in Turkish, Kurdish and English.
69
- The first language in `language_pair` should consist of two strings joined by
70
- an underscore (e.g. "en-tr").
71
- Args:
72
- language_pair: pair of languages that will be used for translation.
73
- **kwargs: keyword arguments forwarded to super.
74
- """
75
- name = "%s_to_%s" % (language_pair[0], language_pair[1])
76
-
77
- description = ("Translation dataset from %s to %s or %s to %s.") % (
78
- language_pair[0],
79
- language_pair[1],
80
- language_pair[1],
81
- language_pair[0],
82
- )
83
- super(BianetConfig, self).__init__(
84
- name=name, description=description, version=datasets.Version(_VERSION, ""), **kwargs
85
- )
86
-
87
- # Validate language pair.
88
- assert language_pair in _VALID_LANGUAGE_PAIRS, (
89
- "Config language pair (%s, " "%s) not supported"
90
- ) % language_pair
91
-
92
- self.language_pair = language_pair
93
-
94
-
95
- class Bianet(datasets.GeneratorBasedBuilder):
96
-
97
- BUILDER_CONFIGS = [
98
- BianetConfig(
99
- language_pair=pair,
100
- )
101
- for pair in _VALID_LANGUAGE_PAIRS.keys()
102
- ]
103
-
104
- BUILDER_CONFIG_CLASS = BianetConfig
105
-
106
- def _info(self):
107
- return datasets.DatasetInfo(
108
- description=_DESCRIPTION,
109
- features=datasets.Features(
110
- {
111
- "id": datasets.Value("string"),
112
- "translation": datasets.Translation(languages=tuple(self.config.language_pair)),
113
- },
114
- ),
115
- supervised_keys=None,
116
- homepage=_HOMEPAGE,
117
- citation=_CITATION,
118
- )
119
-
120
- def _split_generators(self, dl_manager):
121
- download_url = _VALID_LANGUAGE_PAIRS.get(tuple(self.config.language_pair))
122
- path = dl_manager.download_and_extract(download_url)
123
- return [
124
- datasets.SplitGenerator(
125
- name=datasets.Split.TRAIN,
126
- gen_kwargs={"datapath": path},
127
- )
128
- ]
129
-
130
- def _generate_examples(self, datapath):
131
- lang1, lang2 = self.config.language_pair
132
- lang1_file = _BASE_NAME.format(lang1, lang2, lang1)
133
- lang2_file = _BASE_NAME.format(lang1, lang2, lang2)
134
- lang1_path = os.path.join(datapath, lang1_file)
135
- lang2_path = os.path.join(datapath, lang2_file)
136
-
137
- with open(lang1_path, encoding="utf-8") as f1, open(lang2_path, encoding="utf-8") as f2:
138
- for sentence_counter, (x, y) in enumerate(zip(f1, f2)):
139
- x = x.strip()
140
- y = y.strip()
141
- result = (
142
- sentence_counter,
143
- {
144
- "id": str(sentence_counter),
145
- "translation": {lang1: x, lang2: y},
146
- },
147
- )
148
- yield result
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
dataset_infos.json DELETED
@@ -1 +0,0 @@
1
- {"en_to_ku": {"description": "A parallel news corpus in Turkish, Kurdish and English.\nBianet collects 3,214 Turkish articles with their sentence-aligned Kurdish or English translations from the Bianet online newspaper.\n3 languages, 3 bitexts\ntotal number of files: 6\ntotal number of tokens: 2.25M\ntotal number of sentence fragments: 0.14M\n", "citation": "Ataman, D. (2018) Bianet: A Parallel News Corpus in Turkish, Kurdish and English. In Proceedings of the LREC 2018 Workshop MLP-Moment. pp. 14-17. pdf", "homepage": "http://opus.nlpl.eu/Bianet.php", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "translation": {"languages": ["en", "ku"], "id": null, "_type": "Translation"}}, "post_processed": null, "supervised_keys": null, "builder_name": "bianet", "config_name": "en_to_ku", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 1800806, "num_examples": 6402, "dataset_name": "bianet"}}, "download_checksums": {"http://opus.nlpl.eu/download.php?f=Bianet/v1/moses/en-ku.txt.zip": {"num_bytes": 622420, "checksum": "04238f916be585e426318eb486ecaa00c9800bdd219bd47216bbe1cb732bcde5"}}, "download_size": 622420, "post_processing_size": null, "dataset_size": 1800806, "size_in_bytes": 2423226}, "en_to_tr": {"description": "A parallel news corpus in Turkish, Kurdish and English.\nBianet collects 3,214 Turkish articles with their sentence-aligned Kurdish or English translations from the Bianet online newspaper.\n3 languages, 3 bitexts\ntotal number of files: 6\ntotal number of tokens: 2.25M\ntotal number of sentence fragments: 0.14M\n", "citation": "Ataman, D. (2018) Bianet: A Parallel News Corpus in Turkish, Kurdish and English. In Proceedings of the LREC 2018 Workshop MLP-Moment. pp. 14-17. pdf", "homepage": "http://opus.nlpl.eu/Bianet.php", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "translation": {"languages": ["en", "tr"], "id": null, "_type": "Translation"}}, "post_processed": null, "supervised_keys": null, "builder_name": "bianet", "config_name": "en_to_tr", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 10231043, "num_examples": 34770, "dataset_name": "bianet"}}, "download_checksums": {"http://opus.nlpl.eu/download.php?f=Bianet/v1/moses/en-tr.txt.zip": {"num_bytes": 3544116, "checksum": "08f0857457438be03e4e6f6359cac99522fe43d99daa92d49cec0d90fa0c68d4"}}, "download_size": 3544116, "post_processing_size": null, "dataset_size": 10231043, "size_in_bytes": 13775159}, "ku_to_tr": {"description": "A parallel news corpus in Turkish, Kurdish and English.\nBianet collects 3,214 Turkish articles with their sentence-aligned Kurdish or English translations from the Bianet online newspaper.\n3 languages, 3 bitexts\ntotal number of files: 6\ntotal number of tokens: 2.25M\ntotal number of sentence fragments: 0.14M\n", "citation": "Ataman, D. (2018) Bianet: A Parallel News Corpus in Turkish, Kurdish and English. In Proceedings of the LREC 2018 Workshop MLP-Moment. pp. 14-17. pdf", "homepage": "http://opus.nlpl.eu/Bianet.php", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "translation": {"languages": ["ku", "tr"], "id": null, "_type": "Translation"}}, "post_processed": null, "supervised_keys": null, "builder_name": "bianet", "config_name": "ku_to_tr", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 2086550, "num_examples": 7325, "dataset_name": "bianet"}}, "download_checksums": {"http://opus.nlpl.eu/download.php?f=Bianet/v1/moses/ku-tr.txt.zip": {"num_bytes": 725227, "checksum": "daf8ecaf9af71b1466d95703ffd335a8cc28578afb82e2d305eaaa56b1e2a12a"}}, "download_size": 725227, "post_processing_size": null, "dataset_size": 2086550, "size_in_bytes": 2811777}}
 
 
en_to_ku/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a1c2d56a547eed9ba036f3480888a0ae0295d4bc5c78f3551da71cc638447b0d
3
+ size 1019265
en_to_tr/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5591d4132302a27de22e3f5d7f2f671465915b1e1f8bea8015731a9ee783e264
3
+ size 5932117
ku_to_tr/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:66e2be325d5b68463412265685096ca905f33bbf103f55327427ae9a2c135066
3
+ size 1206133