Datasets:

Modalities:
Tabular
Text
Formats:
parquet
ArXiv:
Libraries:
Datasets
pandas
License:
albertvillanova HF staff commited on
Commit
01bd059
1 Parent(s): c29c92d

Delete loading script

Browse files
Files changed (1) hide show
  1. cmu_hinglish_dog.py +0 -190
cmu_hinglish_dog.py DELETED
@@ -1,190 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- import json
17
- import os
18
- import re
19
-
20
- import datasets
21
-
22
-
23
- _CITATION = """\
24
- @inproceedings{cmu_dog_emnlp18,
25
- title={A Dataset for Document Grounded Conversations},
26
- author={Zhou, Kangyan and Prabhumoye, Shrimai and Black, Alan W},
27
- year={2018},
28
- booktitle={Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing}
29
- }
30
-
31
- @inproceedings{khanuja-etal-2020-gluecos,
32
- title = "{GLUEC}o{S}: An Evaluation Benchmark for Code-Switched {NLP}",
33
- author = "Khanuja, Simran and
34
- Dandapat, Sandipan and
35
- Srinivasan, Anirudh and
36
- Sitaram, Sunayana and
37
- Choudhury, Monojit",
38
- booktitle = "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics",
39
- month = jul,
40
- year = "2020",
41
- address = "Online",
42
- publisher = "Association for Computational Linguistics",
43
- url = "https://www.aclweb.org/anthology/2020.acl-main.329",
44
- pages = "3575--3585"
45
- }
46
- """
47
-
48
- _DESCRIPTION = """\
49
- This is a collection of text conversations in Hinglish (code mixing between Hindi-English) and their corresponding English only versions. Can be used for Translating between the two.
50
- """
51
-
52
- _HOMEPAGE = "http://festvox.org/cedar/data/notyet/"
53
- _URL_HINGLISH = "http://festvox.org/cedar/data/notyet/CMUHinglishDoG.zip"
54
- # From: https://github.com/festvox/datasets-CMU_DoG/archive/master/Conversations.zip
55
- _URL_ENGLISH = "data-english.zip"
56
-
57
-
58
- class CMUHinglishDoG(datasets.GeneratorBasedBuilder):
59
- """Load the CMU Hinglish DoG Data for MT"""
60
-
61
- def _info(self):
62
- features = datasets.Features(
63
- {
64
- "date": datasets.Value("string"),
65
- "docIdx": datasets.Value("int64"),
66
- "translation": datasets.Translation(languages=["en", "hi_en"]),
67
- "uid": datasets.Value("string"),
68
- "utcTimestamp": datasets.Value("string"),
69
- "rating": datasets.Value("int64"),
70
- "status": datasets.Value("int64"),
71
- "uid1LogInTime": datasets.Value("string"),
72
- "uid1LogOutTime": datasets.Value("string"),
73
- "uid1response": {
74
- "response": datasets.Sequence(datasets.Value("int64")),
75
- "type": datasets.Value("string"),
76
- },
77
- "uid2response": {
78
- "response": datasets.Sequence(datasets.Value("int64")),
79
- "type": datasets.Value("string"),
80
- },
81
- "user2_id": datasets.Value("string"),
82
- "whoSawDoc": datasets.Sequence(datasets.Value("string")),
83
- "wikiDocumentIdx": datasets.Value("int64"),
84
- }
85
- )
86
- return datasets.DatasetInfo(
87
- description=_DESCRIPTION,
88
- features=features,
89
- supervised_keys=None,
90
- homepage=_HOMEPAGE,
91
- citation=_CITATION,
92
- )
93
-
94
- def _split_generators(self, dl_manager):
95
- """The linking part between Hinglish data and English data is inspired from the implementation in GLUECoS.
96
- Refer here for the original script https://github.com/microsoft/GLUECoS/blob/7fdc51653e37a32aee17505c47b7d1da364fa77e/Data/Preprocess_Scripts/preprocess_mt_en_hi.py"""
97
-
98
- eng_path = dl_manager.download_and_extract(_URL_ENGLISH)
99
- data_dir_en = os.path.join(eng_path, "Conversations")
100
-
101
- hi_en_path = dl_manager.download_and_extract(_URL_HINGLISH)
102
- data_dir_hi_en = os.path.join(hi_en_path, "CMUHinglishDoG", "Conversations_Hinglish")
103
-
104
- hi_en_dirs = {
105
- "train": os.path.join(data_dir_hi_en, "train"),
106
- "valid": os.path.join(data_dir_hi_en, "valid"),
107
- "test": os.path.join(data_dir_hi_en, "test"),
108
- }
109
-
110
- return [
111
- datasets.SplitGenerator(
112
- name=datasets.Split.TRAIN,
113
- gen_kwargs={
114
- "hi_en_dir": hi_en_dirs["train"],
115
- "data_dir_en": data_dir_en,
116
- },
117
- ),
118
- datasets.SplitGenerator(
119
- name=datasets.Split.TEST,
120
- gen_kwargs={
121
- "hi_en_dir": hi_en_dirs["test"],
122
- "data_dir_en": data_dir_en,
123
- },
124
- ),
125
- datasets.SplitGenerator(
126
- name=datasets.Split.VALIDATION,
127
- gen_kwargs={
128
- "hi_en_dir": hi_en_dirs["valid"],
129
- "data_dir_en": data_dir_en,
130
- },
131
- ),
132
- ]
133
-
134
- def _generate_examples(self, hi_en_dir, data_dir_en):
135
- """Yields examples."""
136
- english_files_train = os.listdir(os.path.join(data_dir_en, "train"))
137
- english_files_val = os.listdir(os.path.join(data_dir_en, "valid"))
138
- english_files_test = os.listdir(os.path.join(data_dir_en, "test"))
139
-
140
- hinglish_files = os.listdir(hi_en_dir)
141
- key = 0
142
- for f in hinglish_files:
143
- en_file_path = f.split(".json")[0] + ".json"
144
- found = True
145
- # Looks for the corresponding english file in all 3 splits
146
- if en_file_path in english_files_train:
147
- en = json.load(open(os.path.join(os.path.join(data_dir_en, "train"), en_file_path)))
148
- elif en_file_path in english_files_val:
149
- en = json.load(open(os.path.join(os.path.join(data_dir_en, "valid"), en_file_path)))
150
- elif en_file_path in english_files_test:
151
- en = json.load(open(os.path.join(os.path.join(data_dir_en, "test"), en_file_path)))
152
- else:
153
- found = False
154
- if found:
155
- hi_en = json.load(open(os.path.join(hi_en_dir, f)))
156
-
157
- assert len(en["history"]) == len(hi_en["history"])
158
-
159
- for x, y in zip(en["history"], hi_en["history"]):
160
- assert x["docIdx"] == y["docIdx"]
161
- assert x["uid"] == y["uid"]
162
- assert x["utcTimestamp"] == y["utcTimestamp"]
163
-
164
- x["text"] = re.sub("\t|\n", " ", x["text"])
165
- y["text"] = re.sub("\t|\n", " ", y["text"])
166
- line = {
167
- "date": hi_en["date"],
168
- "uid": x["uid"],
169
- "docIdx": x["docIdx"],
170
- "utcTimestamp": x["utcTimestamp"],
171
- "translation": {"hi_en": y["text"], "en": x["text"]},
172
- "rating": hi_en["rating"],
173
- "status": hi_en["status"],
174
- "uid1LogOutTime": hi_en.get("uid1LogOutTime"),
175
- "uid1LogInTime": hi_en["uid1LogInTime"],
176
- "uid1response": {
177
- "response": hi_en["uid1response"]["response"] if "uid1response" in hi_en else [],
178
- "type": hi_en["uid1response"]["type"] if "uid1response" in hi_en else None,
179
- },
180
- "uid2response": {
181
- "response": hi_en["uid2response"]["response"] if "uid2response" in hi_en else [],
182
- "type": hi_en["uid2response"]["type"] if "uid2response" in hi_en else None,
183
- },
184
- "user2_id": hi_en["user2_id"],
185
- "whoSawDoc": hi_en["whoSawDoc"],
186
- "wikiDocumentIdx": hi_en["wikiDocumentIdx"],
187
- }
188
-
189
- yield key, line
190
- key += 1