Datasets:

Modalities:
Text
Formats:
parquet
Libraries:
Datasets
pandas
License:
albertvillanova HF staff commited on
Commit
e011134
1 Parent(s): 1aaf3a5

Convert dataset to Parquet (#2)

Browse files

- Convert dataset to Parquet (f5f3944189cf55b94575db9a03a249b6afab9485)
- Delete loading script (1ea98f4ae60f9e2506c251553f69e64ed6caa636)

README.md CHANGED
@@ -17,9 +17,9 @@ source_datasets:
17
  task_categories:
18
  - translation
19
  task_ids: []
20
- paperswithcode_id: null
21
  pretty_name: HrenwacPara
22
  dataset_info:
 
23
  features:
24
  - name: translation
25
  dtype:
@@ -27,13 +27,18 @@ dataset_info:
27
  languages:
28
  - en
29
  - hr
30
- config_name: hrenWaC
31
  splits:
32
  - name: train
33
- num_bytes: 29602110
34
  num_examples: 99001
35
- download_size: 11640281
36
- dataset_size: 29602110
 
 
 
 
 
 
37
  ---
38
 
39
  # Dataset Card for hrenwac_para
 
17
  task_categories:
18
  - translation
19
  task_ids: []
 
20
  pretty_name: HrenwacPara
21
  dataset_info:
22
+ config_name: hrenWaC
23
  features:
24
  - name: translation
25
  dtype:
 
27
  languages:
28
  - en
29
  - hr
 
30
  splits:
31
  - name: train
32
+ num_bytes: 29602030
33
  num_examples: 99001
34
+ download_size: 19255300
35
+ dataset_size: 29602030
36
+ configs:
37
+ - config_name: hrenWaC
38
+ data_files:
39
+ - split: train
40
+ path: hrenWaC/train-*
41
+ default: true
42
  ---
43
 
44
  # Dataset Card for hrenwac_para
hrenWaC/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:adba795bfffdf805f3a0e4c619440442b0cdc95654d967edc86c4f8ead49f675
3
+ size 19255300
hrenwac_para.py DELETED
@@ -1,91 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
- """Croatian-English parallel corpus hrenWaC"""
16
-
17
- import datasets
18
-
19
-
20
- _CITATION = """
21
- @misc{11356/1058,
22
- title = {Croatian-English parallel corpus {hrenWaC} 2.0},
23
- author = {Ljube{\v s}i{\'c}, Nikola and Espl{\'a}-Gomis, Miquel and Ortiz Rojas, Sergio and Klubi{\v c}ka, Filip and Toral, Antonio},
24
- url = {http://hdl.handle.net/11356/1058},
25
- note = {Slovenian language resource repository {CLARIN}.{SI}},
26
- copyright = {{CLARIN}.{SI} User Licence for Internet Corpora},
27
- year = {2016} }
28
- """
29
-
30
- _DESCRIPTION = """
31
- The hrenWaC corpus version 2.0 consists of parallel Croatian-English texts crawled from the .hr top-level domain for Croatia.
32
- The corpus was built with Spidextor (https://github.com/abumatran/spidextor), a tool that glues together the output of SpiderLing used for crawling and Bitextor used for bitext extraction. The accuracy of the extracted bitext on the segment level is around 80% and on the word level around 84%.
33
- """
34
-
35
- _LICENSE = "CC BY-SA 3.0"
36
-
37
- _HOMEPAGE = "http://nlp.ffzg.hr/resources/corpora/hrenwac/"
38
- _URLS = "http://nlp.ffzg.hr/data/corpora/hrenwac/hrenwac.en-hr.txt.gz"
39
-
40
-
41
- class HrenwacPara(datasets.GeneratorBasedBuilder):
42
- """Croatian-English parallel corpus hrenWaC"""
43
-
44
- VERSION = datasets.Version("1.0.0")
45
- BUILDER_CONFIGS = [
46
- datasets.BuilderConfig(
47
- name="hrenWaC",
48
- version=VERSION,
49
- description="The hrenWaC dataset.",
50
- ),
51
- ]
52
-
53
- def _info(self):
54
- return datasets.DatasetInfo(
55
- description=_DESCRIPTION,
56
- features=datasets.Features({"translation": datasets.features.Translation(languages=("en", "hr"))}),
57
- supervised_keys=("en", "hr"),
58
- homepage=_HOMEPAGE,
59
- license=_LICENSE,
60
- citation=_CITATION,
61
- )
62
-
63
- def _split_generators(self, dl_manager):
64
- downloaded_file = dl_manager.download_and_extract({"train": _URLS})
65
- return [
66
- datasets.SplitGenerator(
67
- name=datasets.Split.TRAIN,
68
- gen_kwargs={
69
- "filepath": downloaded_file["train"],
70
- },
71
- ),
72
- ]
73
-
74
- def _generate_examples(self, filepath):
75
- with open(filepath, encoding="utf8") as f:
76
- en = ""
77
- hr = ""
78
- i = -1
79
- for id_, row in enumerate(f):
80
- if id_ % 3 == 0:
81
- en = row.strip()
82
- if id_ % 3 == 1:
83
- hr = row.strip()
84
- if id_ % 3 == 2:
85
- i = i + 1
86
- yield i, {
87
- "translation": {
88
- "en": en,
89
- "hr": hr,
90
- }
91
- }