holylovenia commited on
Commit
2f1be48
1 Parent(s): 1cc8f7f

Upload paracotta_id.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. paracotta_id.py +139 -0
paracotta_id.py ADDED
@@ -0,0 +1,139 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from pathlib import Path
3
+ from typing import Dict, List, Tuple
4
+
5
+ import datasets
6
+
7
+ from nusacrowd.utils.configs import NusantaraConfig
8
+ from nusacrowd.utils.constants import Tasks
9
+ from nusacrowd.utils import schemas
10
+ import jsonlines
11
+ from nltk.tokenize.treebank import TreebankWordDetokenizer
12
+
13
+
14
+ _CITATION = """\
15
+ @article{aji2022paracotta,
16
+ title={ParaCotta: Synthetic Multilingual Paraphrase Corpora from the Most Diverse Translation Sample Pair},
17
+ author={Aji, Alham Fikri and Fatyanosa, Tirana Noor and Prasojo, Radityo Eko and Arthur, Philip and Fitriany, Suci and Qonitah, Salma and Zulfa, Nadhifa and Santoso, Tomi and Data, Mahendra},
18
+ journal={arXiv preprint arXiv:2205.04651},
19
+ year={2022}
20
+ }
21
+ """
22
+
23
+ _LANGUAGES = ["ind"] # We follow ISO639-3 language code (https://iso639-3.sil.org/code_tables/639/data)
24
+ _LOCAL = False
25
+
26
+ _DATASETNAME = "paracotta_id"
27
+
28
+ _DESCRIPTION = """\
29
+ ParaCotta is a synthetic parallel paraphrase corpus across 17 languages: Arabic, Catalan, Czech, German, English, Spanish, Estonian, French, Hindi, Indonesian, Italian, Dutch, Ro- manian, Russian, Swedish, Vietnamese, and Chinese.
30
+ """
31
+
32
+ _HOMEPAGE = "https://github.com/afaji/paracotta-paraphrase"
33
+
34
+ _LICENSE = "Unknown"
35
+
36
+ _URLS = {
37
+ _DATASETNAME: "https://drive.google.com/uc?id=1QPyD4lOKxbXGUypA5ke6Y9_i9utq-QSQ",
38
+ }
39
+
40
+ _SUPPORTED_TASKS = [Tasks.PARAPHRASING]
41
+
42
+ # Dataset does not have versioning
43
+ _SOURCE_VERSION = "1.0.0"
44
+ _NUSANTARA_VERSION = "1.0.0"
45
+
46
+
47
+ class ParaCotta(datasets.GeneratorBasedBuilder):
48
+ """ParaCotta is a synthetic parallel paraphrase corpus across 17 languages: Arabic, Catalan, Czech, German, English, Spanish, Estonian, French, Hindi, Indonesian, Italian, Dutch, Ro- manian, Russian, Swedish, Vietnamese, and Chinese.
49
+ """
50
+
51
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
52
+ NUSANTARA_VERSION = datasets.Version(_NUSANTARA_VERSION)
53
+
54
+ BUILDER_CONFIGS = [
55
+ NusantaraConfig(
56
+ name="paracotta_id_source",
57
+ version=SOURCE_VERSION,
58
+ description="paracotta_id source schema",
59
+ schema="source",
60
+ subset_id="paracotta_id",
61
+ ),
62
+ NusantaraConfig(
63
+ name="paracotta_id_nusantara_t2t",
64
+ version=NUSANTARA_VERSION,
65
+ description="paracotta_id Nusantara schema",
66
+ schema="nusantara_t2t",
67
+ subset_id="paracotta_id",
68
+ ),
69
+ ]
70
+
71
+ DEFAULT_CONFIG_NAME = "paracotta_id_source"
72
+
73
+ def _info(self) -> datasets.DatasetInfo:
74
+ if self.config.schema == "source":
75
+ features = datasets.Features(
76
+ {
77
+ "id": datasets.Value("string"),
78
+ "src": datasets.Value("string"),
79
+ "tgt": datasets.Value("string"),
80
+ }
81
+ )
82
+ elif self.config.schema == "nusantara_t2t":
83
+ features = schemas.text2text_features
84
+
85
+ return datasets.DatasetInfo(
86
+ description=_DESCRIPTION,
87
+ features=features,
88
+ homepage=_HOMEPAGE,
89
+ license=_LICENSE,
90
+ citation=_CITATION,
91
+ )
92
+
93
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
94
+ """Returns SplitGenerators."""
95
+ urls = _URLS[_DATASETNAME]
96
+
97
+ data_dir = Path(dl_manager.download(urls))
98
+ return [
99
+ datasets.SplitGenerator(
100
+ name=datasets.Split.TRAIN,
101
+ gen_kwargs={
102
+ "filepath": data_dir,
103
+ "split": "test",
104
+ },
105
+ ),
106
+ ]
107
+
108
+ def _generate_examples(self, filepath: Path, split: str) -> Tuple[int, Dict]:
109
+ if self.config.schema == "source":
110
+ with open(filepath, 'r') as f:
111
+ data = f.readlines()
112
+ id = 0
113
+ for each_data in data:
114
+ each_data = each_data.strip('\n')
115
+ ex = {
116
+ "id": id,
117
+ "src": each_data.split('\t')[1],
118
+ "tgt": each_data.split('\t')[2],
119
+ }
120
+ id += 1
121
+ yield id, ex
122
+
123
+ elif self.config.schema == "nusantara_t2t":
124
+ with open(filepath, 'r') as f:
125
+ data = f.readlines()
126
+ id = 0
127
+ for each_data in data:
128
+ each_data = each_data.strip('\n')
129
+ ex = {
130
+ "id": id,
131
+ "text_1": each_data.split('\t')[1],
132
+ "text_2": each_data.split('\t')[2],
133
+ "text_1_name": "src",
134
+ "text_2_name": "tgt"
135
+ }
136
+ id += 1
137
+ yield id, ex
138
+ else:
139
+ raise ValueError(f"Invalid config: {self.config.name}")