Datasets:

Modalities:
Tabular
Text
Formats:
parquet
ArXiv:
Libraries:
Datasets
pandas
License:
albertvillanova HF staff commited on
Commit
19796c6
1 Parent(s): b7d0577

Convert dataset to Parquet (#4)

Browse files

- Convert dataset to Parquet (c29c92db214250cfc9cd1dbd6c9d39326bbfe5d5)
- Delete loading script (01bd059da26164e7f76995b65857e975b025fd99)

README.md CHANGED
@@ -12,7 +12,6 @@ license:
12
  multilinguality:
13
  - multilingual
14
  - translation
15
- pretty_name: CMU Document Grounded Conversations
16
  size_categories:
17
  - 1K<n<10K
18
  source_datasets:
@@ -20,6 +19,7 @@ source_datasets:
20
  task_categories:
21
  - translation
22
  task_ids: []
 
23
  dataset_info:
24
  features:
25
  - name: date
@@ -64,16 +64,25 @@ dataset_info:
64
  dtype: int64
65
  splits:
66
  - name: train
67
- num_bytes: 3142398
68
  num_examples: 8060
69
  - name: test
70
- num_bytes: 379521
71
  num_examples: 960
72
  - name: validation
73
- num_bytes: 368726
74
  num_examples: 942
75
- download_size: 8749685
76
- dataset_size: 3890645
 
 
 
 
 
 
 
 
 
77
  ---
78
 
79
  # Dataset Card for CMU Document Grounded Conversations
 
12
  multilinguality:
13
  - multilingual
14
  - translation
 
15
  size_categories:
16
  - 1K<n<10K
17
  source_datasets:
 
19
  task_categories:
20
  - translation
21
  task_ids: []
22
+ pretty_name: CMU Document Grounded Conversations
23
  dataset_info:
24
  features:
25
  - name: date
 
64
  dtype: int64
65
  splits:
66
  - name: train
67
+ num_bytes: 3140818
68
  num_examples: 8060
69
  - name: test
70
+ num_bytes: 379465
71
  num_examples: 960
72
  - name: validation
73
+ num_bytes: 368670
74
  num_examples: 942
75
+ download_size: 1039828
76
+ dataset_size: 3888953
77
+ configs:
78
+ - config_name: default
79
+ data_files:
80
+ - split: train
81
+ path: data/train-*
82
+ - split: test
83
+ path: data/test-*
84
+ - split: validation
85
+ path: data/validation-*
86
  ---
87
 
88
  # Dataset Card for CMU Document Grounded Conversations
cmu_hinglish_dog.py DELETED
@@ -1,190 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- import json
17
- import os
18
- import re
19
-
20
- import datasets
21
-
22
-
23
- _CITATION = """\
24
- @inproceedings{cmu_dog_emnlp18,
25
- title={A Dataset for Document Grounded Conversations},
26
- author={Zhou, Kangyan and Prabhumoye, Shrimai and Black, Alan W},
27
- year={2018},
28
- booktitle={Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing}
29
- }
30
-
31
- @inproceedings{khanuja-etal-2020-gluecos,
32
- title = "{GLUEC}o{S}: An Evaluation Benchmark for Code-Switched {NLP}",
33
- author = "Khanuja, Simran and
34
- Dandapat, Sandipan and
35
- Srinivasan, Anirudh and
36
- Sitaram, Sunayana and
37
- Choudhury, Monojit",
38
- booktitle = "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics",
39
- month = jul,
40
- year = "2020",
41
- address = "Online",
42
- publisher = "Association for Computational Linguistics",
43
- url = "https://www.aclweb.org/anthology/2020.acl-main.329",
44
- pages = "3575--3585"
45
- }
46
- """
47
-
48
- _DESCRIPTION = """\
49
- This is a collection of text conversations in Hinglish (code mixing between Hindi-English) and their corresponding English only versions. Can be used for Translating between the two.
50
- """
51
-
52
- _HOMEPAGE = "http://festvox.org/cedar/data/notyet/"
53
- _URL_HINGLISH = "http://festvox.org/cedar/data/notyet/CMUHinglishDoG.zip"
54
- # From: https://github.com/festvox/datasets-CMU_DoG/archive/master/Conversations.zip
55
- _URL_ENGLISH = "data-english.zip"
56
-
57
-
58
- class CMUHinglishDoG(datasets.GeneratorBasedBuilder):
59
- """Load the CMU Hinglish DoG Data for MT"""
60
-
61
- def _info(self):
62
- features = datasets.Features(
63
- {
64
- "date": datasets.Value("string"),
65
- "docIdx": datasets.Value("int64"),
66
- "translation": datasets.Translation(languages=["en", "hi_en"]),
67
- "uid": datasets.Value("string"),
68
- "utcTimestamp": datasets.Value("string"),
69
- "rating": datasets.Value("int64"),
70
- "status": datasets.Value("int64"),
71
- "uid1LogInTime": datasets.Value("string"),
72
- "uid1LogOutTime": datasets.Value("string"),
73
- "uid1response": {
74
- "response": datasets.Sequence(datasets.Value("int64")),
75
- "type": datasets.Value("string"),
76
- },
77
- "uid2response": {
78
- "response": datasets.Sequence(datasets.Value("int64")),
79
- "type": datasets.Value("string"),
80
- },
81
- "user2_id": datasets.Value("string"),
82
- "whoSawDoc": datasets.Sequence(datasets.Value("string")),
83
- "wikiDocumentIdx": datasets.Value("int64"),
84
- }
85
- )
86
- return datasets.DatasetInfo(
87
- description=_DESCRIPTION,
88
- features=features,
89
- supervised_keys=None,
90
- homepage=_HOMEPAGE,
91
- citation=_CITATION,
92
- )
93
-
94
- def _split_generators(self, dl_manager):
95
- """The linking part between Hinglish data and English data is inspired from the implementation in GLUECoS.
96
- Refer here for the original script https://github.com/microsoft/GLUECoS/blob/7fdc51653e37a32aee17505c47b7d1da364fa77e/Data/Preprocess_Scripts/preprocess_mt_en_hi.py"""
97
-
98
- eng_path = dl_manager.download_and_extract(_URL_ENGLISH)
99
- data_dir_en = os.path.join(eng_path, "Conversations")
100
-
101
- hi_en_path = dl_manager.download_and_extract(_URL_HINGLISH)
102
- data_dir_hi_en = os.path.join(hi_en_path, "CMUHinglishDoG", "Conversations_Hinglish")
103
-
104
- hi_en_dirs = {
105
- "train": os.path.join(data_dir_hi_en, "train"),
106
- "valid": os.path.join(data_dir_hi_en, "valid"),
107
- "test": os.path.join(data_dir_hi_en, "test"),
108
- }
109
-
110
- return [
111
- datasets.SplitGenerator(
112
- name=datasets.Split.TRAIN,
113
- gen_kwargs={
114
- "hi_en_dir": hi_en_dirs["train"],
115
- "data_dir_en": data_dir_en,
116
- },
117
- ),
118
- datasets.SplitGenerator(
119
- name=datasets.Split.TEST,
120
- gen_kwargs={
121
- "hi_en_dir": hi_en_dirs["test"],
122
- "data_dir_en": data_dir_en,
123
- },
124
- ),
125
- datasets.SplitGenerator(
126
- name=datasets.Split.VALIDATION,
127
- gen_kwargs={
128
- "hi_en_dir": hi_en_dirs["valid"],
129
- "data_dir_en": data_dir_en,
130
- },
131
- ),
132
- ]
133
-
134
- def _generate_examples(self, hi_en_dir, data_dir_en):
135
- """Yields examples."""
136
- english_files_train = os.listdir(os.path.join(data_dir_en, "train"))
137
- english_files_val = os.listdir(os.path.join(data_dir_en, "valid"))
138
- english_files_test = os.listdir(os.path.join(data_dir_en, "test"))
139
-
140
- hinglish_files = os.listdir(hi_en_dir)
141
- key = 0
142
- for f in hinglish_files:
143
- en_file_path = f.split(".json")[0] + ".json"
144
- found = True
145
- # Looks for the corresponding english file in all 3 splits
146
- if en_file_path in english_files_train:
147
- en = json.load(open(os.path.join(os.path.join(data_dir_en, "train"), en_file_path)))
148
- elif en_file_path in english_files_val:
149
- en = json.load(open(os.path.join(os.path.join(data_dir_en, "valid"), en_file_path)))
150
- elif en_file_path in english_files_test:
151
- en = json.load(open(os.path.join(os.path.join(data_dir_en, "test"), en_file_path)))
152
- else:
153
- found = False
154
- if found:
155
- hi_en = json.load(open(os.path.join(hi_en_dir, f)))
156
-
157
- assert len(en["history"]) == len(hi_en["history"])
158
-
159
- for x, y in zip(en["history"], hi_en["history"]):
160
- assert x["docIdx"] == y["docIdx"]
161
- assert x["uid"] == y["uid"]
162
- assert x["utcTimestamp"] == y["utcTimestamp"]
163
-
164
- x["text"] = re.sub("\t|\n", " ", x["text"])
165
- y["text"] = re.sub("\t|\n", " ", y["text"])
166
- line = {
167
- "date": hi_en["date"],
168
- "uid": x["uid"],
169
- "docIdx": x["docIdx"],
170
- "utcTimestamp": x["utcTimestamp"],
171
- "translation": {"hi_en": y["text"], "en": x["text"]},
172
- "rating": hi_en["rating"],
173
- "status": hi_en["status"],
174
- "uid1LogOutTime": hi_en.get("uid1LogOutTime"),
175
- "uid1LogInTime": hi_en["uid1LogInTime"],
176
- "uid1response": {
177
- "response": hi_en["uid1response"]["response"] if "uid1response" in hi_en else [],
178
- "type": hi_en["uid1response"]["type"] if "uid1response" in hi_en else None,
179
- },
180
- "uid2response": {
181
- "response": hi_en["uid2response"]["response"] if "uid2response" in hi_en else [],
182
- "type": hi_en["uid2response"]["type"] if "uid2response" in hi_en else None,
183
- },
184
- "user2_id": hi_en["user2_id"],
185
- "whoSawDoc": hi_en["whoSawDoc"],
186
- "wikiDocumentIdx": hi_en["wikiDocumentIdx"],
187
- }
188
-
189
- yield key, line
190
- key += 1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
data/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:75aac173d55983b24852fc30f097a60e11e9055cfef485b719e80f77d1c7767e
3
+ size 99568
data/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ce36a4aa054be82e6fc6857afa796fd1145b6544566e9151655b799ef17bf22a
3
+ size 843924
data/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b13707b2fde4203d77a10f7c0e6bfe6ea27ec8014dd7d7cd868ca67f1051cc35
3
+ size 96336