albertvillanova HF staff commited on
Commit
eb58854
1 Parent(s): bf289a5

Convert dataset to Parquet (#3)

Browse files

- Convert dataset to Parquet (186d185991cc4daef68f5041da884e2721015dcd)
- Delete loading script (3756a1eaf11cde4759c843a5f4fd09b8fd83f48e)

README.md CHANGED
@@ -68,13 +68,20 @@ dataset_info:
68
  dtype: int32
69
  splits:
70
  - name: train
71
- num_bytes: 13496125
72
  num_examples: 10006
73
  - name: test
74
- num_bytes: 1731449
75
  num_examples: 1342
76
- download_size: 5765261
77
- dataset_size: 15227574
 
 
 
 
 
 
 
78
  ---
79
 
80
  # Dataset Card for ReDial (Recommendation Dialogues)
 
68
  dtype: int32
69
  splits:
70
  - name: train
71
+ num_bytes: 13490771
72
  num_examples: 10006
73
  - name: test
74
+ num_bytes: 1731413
75
  num_examples: 1342
76
+ download_size: 7449804
77
+ dataset_size: 15222184
78
+ configs:
79
+ - config_name: default
80
+ data_files:
81
+ - split: train
82
+ path: data/train-*
83
+ - split: test
84
+ path: data/test-*
85
  ---
86
 
87
  # Dataset Card for ReDial (Recommendation Dialogues)
data/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2dd52106a66d484df11c10d06b8ca698917e22e79e0748d55124c2d8b0ea8dd0
3
+ size 834986
data/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c7ff962a8ec261d27be4ea16a909656d5b47f0fd923cdbce93d14208bb8e6b38
3
+ size 6614818
re_dial.py DELETED
@@ -1,160 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
- """Annotated dataset of dialogues where users recommend movies to each other."""
16
-
17
-
18
- import json
19
- import os
20
-
21
- import datasets
22
-
23
-
24
- _CITATION = """\
25
- @inproceedings{li2018conversational,
26
- title={Towards Deep Conversational Recommendations},
27
- author={Li, Raymond and Kahou, Samira Ebrahimi and Schulz, Hannes and Michalski, Vincent and Charlin, Laurent and Pal, Chris},
28
- booktitle={Advances in Neural Information Processing Systems 31 (NIPS 2018)},
29
- year={2018}
30
- }
31
- """
32
-
33
- _DESCRIPTION = """\
34
- ReDial (Recommendation Dialogues) is an annotated dataset of dialogues, where users
35
- recommend movies to each other. The dataset was collected by a team of researchers working at
36
- Polytechnique Montréal, MILA – Quebec AI Institute, Microsoft Research Montréal, HEC Montreal, and Element AI.
37
-
38
- The dataset allows research at the intersection of goal-directed dialogue systems
39
- (such as restaurant recommendation) and free-form (also called “chit-chat”) dialogue systems.
40
- """
41
-
42
- _HOMEPAGE = "https://redialdata.github.io/website/"
43
-
44
- _LICENSE = "CC BY 4.0 License."
45
-
46
- _DATA_URL = "https://github.com/ReDialData/website/raw/data/redial_dataset.zip"
47
-
48
-
49
- class ReDial(datasets.GeneratorBasedBuilder):
50
- """Annotated dataset of dialogues where users recommend movies to each other."""
51
-
52
- VERSION = datasets.Version("1.1.0")
53
-
54
- def _info(self):
55
- question_features = {
56
- "movieId": datasets.Value("string"),
57
- "suggested": datasets.Value("int32"),
58
- "seen": datasets.Value("int32"),
59
- "liked": datasets.Value("int32"),
60
- }
61
- features = datasets.Features(
62
- {
63
- "movieMentions": [
64
- {
65
- "movieId": datasets.Value("string"),
66
- "movieName": datasets.Value("string"),
67
- },
68
- ],
69
- "respondentQuestions": [question_features],
70
- "messages": [
71
- {
72
- "timeOffset": datasets.Value("int32"),
73
- "text": datasets.Value("string"),
74
- "senderWorkerId": datasets.Value("int32"),
75
- "messageId": datasets.Value("int32"),
76
- },
77
- ],
78
- "conversationId": datasets.Value("int32"),
79
- "respondentWorkerId": datasets.Value("int32"),
80
- "initiatorWorkerId": datasets.Value("int32"),
81
- "initiatorQuestions": [question_features],
82
- }
83
- )
84
- return datasets.DatasetInfo(
85
- # This is the description that will appear on the datasets page.
86
- description=_DESCRIPTION,
87
- # This defines the different columns of the dataset and their types
88
- features=features, # Here we define them above because they are different between the two configurations
89
- # If there's a common (input, target) tuple from the features,
90
- # specify them here. They'll be used if as_supervised=True in
91
- # builder.as_dataset.
92
- supervised_keys=None,
93
- # Homepage of the dataset for documentation
94
- homepage=_HOMEPAGE,
95
- # License for the dataset if available
96
- license=_LICENSE,
97
- # Citation for the dataset
98
- citation=_CITATION,
99
- )
100
-
101
- def _split_generators(self, dl_manager):
102
- """Returns SplitGenerators."""
103
- data_dir = dl_manager.download_and_extract(_DATA_URL)
104
-
105
- return [
106
- datasets.SplitGenerator(
107
- name=datasets.Split.TRAIN,
108
- # These kwargs will be passed to _generate_examples
109
- gen_kwargs={
110
- "filepath": os.path.join(data_dir, "train_data.jsonl"),
111
- "split": "train",
112
- },
113
- ),
114
- datasets.SplitGenerator(
115
- name=datasets.Split.TEST,
116
- # These kwargs will be passed to _generate_examples
117
- gen_kwargs={"filepath": os.path.join(data_dir, "test_data.jsonl"), "split": "test"},
118
- ),
119
- ]
120
-
121
- def _generate_examples(self, filepath, split):
122
- """Yields examples."""
123
-
124
- with open(filepath, encoding="utf-8") as f:
125
- examples = f.readlines()
126
- for id_, row in enumerate(examples):
127
- data = json.loads(row.strip())
128
- d = {}
129
- movieMentions_list = []
130
- for i in data["movieMentions"]:
131
- d["movieId"] = i
132
- d["movieName"] = data["movieMentions"][i]
133
- movieMentions_list.append(d)
134
- d = {}
135
-
136
- respondentQuestions_list = []
137
- for i in data["respondentQuestions"]:
138
- d["movieId"] = i
139
- alpha = data["respondentQuestions"][i]
140
- z = {**d, **alpha} # merging 2 dictionaries
141
- respondentQuestions_list.append(z)
142
- d = {}
143
-
144
- initiatorQuestions_list = []
145
- for i in data["initiatorQuestions"]:
146
- d["movieId"] = i
147
- alpha = data["initiatorQuestions"][i]
148
- z = {**d, **alpha} # merging 2 dictionaries
149
- initiatorQuestions_list.append(z)
150
- d = {}
151
-
152
- yield id_, {
153
- "movieMentions": movieMentions_list,
154
- "respondentQuestions": respondentQuestions_list,
155
- "messages": data["messages"],
156
- "conversationId": data["conversationId"],
157
- "respondentWorkerId": data["respondentWorkerId"],
158
- "initiatorWorkerId": data["initiatorWorkerId"],
159
- "initiatorQuestions": initiatorQuestions_list,
160
- }