albertvillanova HF staff commited on
Commit
81f6813
1 Parent(s): 8b6c710

Convert dataset to Parquet (#2)

Browse files

- Convert dataset to Parquet (172cd31f93dacfda9bf86f4c626b6d01f99653b2)
- Delete loading script (88eed5db6680341672d438ab278cf605abdc54e6)

README.md CHANGED
@@ -17,9 +17,9 @@ source_datasets:
17
  task_categories:
18
  - translation
19
  task_ids: []
20
- paperswithcode_id: null
21
  pretty_name: OpusTedtalks
22
  dataset_info:
 
23
  features:
24
  - name: id
25
  dtype: string
@@ -29,13 +29,17 @@ dataset_info:
29
  languages:
30
  - en
31
  - hr
32
- config_name: en-hr
33
  splits:
34
  - name: train
35
- num_bytes: 15249417
36
  num_examples: 86348
37
- download_size: 5639306
38
- dataset_size: 15249417
 
 
 
 
 
39
  ---
40
 
41
  # Dataset Card for OpusTedtalks
17
  task_categories:
18
  - translation
19
  task_ids: []
 
20
  pretty_name: OpusTedtalks
21
  dataset_info:
22
+ config_name: en-hr
23
  features:
24
  - name: id
25
  dtype: string
29
  languages:
30
  - en
31
  - hr
 
32
  splits:
33
  - name: train
34
+ num_bytes: 15249309
35
  num_examples: 86348
36
+ download_size: 9932158
37
+ dataset_size: 15249309
38
+ configs:
39
+ - config_name: en-hr
40
+ data_files:
41
+ - split: train
42
+ path: en-hr/train-*
43
  ---
44
 
45
  # Dataset Card for OpusTedtalks
en-hr/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:17eaa0f05b079e94d2ea39186f44369a879bd2fec48dbd8d1dbb902bb2455cce
3
+ size 9932158
opus_tedtalks.py DELETED
@@ -1,121 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 HuggingFace Datasets Authors.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- # Lint as: python3
17
- import os
18
-
19
- import datasets
20
-
21
-
22
- _DESCRIPTION = """\
23
- This is a Croatian-English parallel corpus of transcribed and translated TED talks, originally extracted from https://wit3.fbk.eu. The corpus is compiled by Željko Agić and is taken from http://lt.ffzg.hr/zagic provided under the CC-BY-NC-SA license.
24
- 2 languages, total number of files: 2
25
- total number of tokens: 2.81M
26
- total number of sentence fragments: 0.17M
27
- """
28
- _HOMEPAGE_URL = "http://opus.nlpl.eu/TedTalks.php"
29
- _CITATION = """\
30
- @InProceedings{TIEDEMANN12.463,
31
- author = {J{\"o}rg Tiedemann},
32
- title = {Parallel Data, Tools and Interfaces in OPUS},
33
- booktitle = {Proceedings of the Eight International Conference on Language Resources and Evaluation (LREC'12)},
34
- year = {2012},
35
- month = {may},
36
- date = {23-25},
37
- address = {Istanbul, Turkey},
38
- editor = {Nicoletta Calzolari (Conference Chair) and Khalid Choukri and Thierry Declerck and Mehmet Ugur Dogan and Bente Maegaard and Joseph Mariani and Jan Odijk and Stelios Piperidis},
39
- publisher = {European Language Resources Association (ELRA)},
40
- isbn = {978-2-9517408-7-7},
41
- language = {english}
42
- }
43
- """
44
-
45
- _VERSION = "1.0.0"
46
- _BASE_NAME = "TedTalks.{}.{}"
47
- _BASE_URL = "https://object.pouta.csc.fi/OPUS-TedTalks/v1/moses/{}-{}.txt.zip"
48
- _LANGUAGE_PAIRS = [
49
- ("en", "hr"),
50
- ]
51
-
52
-
53
- class TedTalksConfig(datasets.BuilderConfig):
54
- def __init__(self, *args, lang1=None, lang2=None, **kwargs):
55
- super().__init__(
56
- *args,
57
- name=f"{lang1}-{lang2}",
58
- **kwargs,
59
- )
60
- self.lang1 = lang1
61
- self.lang2 = lang2
62
-
63
-
64
- class OpusTedtalks(datasets.GeneratorBasedBuilder):
65
- BUILDER_CONFIGS = [
66
- TedTalksConfig(
67
- lang1=lang1,
68
- lang2=lang2,
69
- description=f"Translating {lang1} to {lang2} or vice versa",
70
- version=datasets.Version(_VERSION),
71
- )
72
- for lang1, lang2 in _LANGUAGE_PAIRS
73
- ]
74
- BUILDER_CONFIG_CLASS = TedTalksConfig
75
-
76
- def _info(self):
77
- return datasets.DatasetInfo(
78
- description=_DESCRIPTION,
79
- features=datasets.Features(
80
- {
81
- "id": datasets.Value("string"),
82
- "translation": datasets.Translation(languages=(self.config.lang1, self.config.lang2)),
83
- },
84
- ),
85
- supervised_keys=None,
86
- homepage=_HOMEPAGE_URL,
87
- citation=_CITATION,
88
- )
89
-
90
- def _split_generators(self, dl_manager):
91
- def _base_url(lang1, lang2):
92
- return _BASE_URL.format(lang1, lang2)
93
-
94
- download_url = _base_url(self.config.lang1, self.config.lang2)
95
- path = dl_manager.download_and_extract(download_url)
96
- return [
97
- datasets.SplitGenerator(
98
- name=datasets.Split.TRAIN,
99
- gen_kwargs={"datapath": path},
100
- )
101
- ]
102
-
103
- def _generate_examples(self, datapath):
104
- l1, l2 = self.config.lang1, self.config.lang2
105
- folder = l1 + "-" + l2
106
- l1_file = _BASE_NAME.format(folder, l1)
107
- l2_file = _BASE_NAME.format(folder, l2)
108
- l1_path = os.path.join(datapath, l1_file)
109
- l2_path = os.path.join(datapath, l2_file)
110
- with open(l1_path, encoding="utf-8") as f1, open(l2_path, encoding="utf-8") as f2:
111
- for sentence_counter, (x, y) in enumerate(zip(f1, f2)):
112
- x = x.strip()
113
- y = y.strip()
114
- result = (
115
- sentence_counter,
116
- {
117
- "id": str(sentence_counter),
118
- "translation": {l1: x, l2: y},
119
- },
120
- )
121
- yield result