Kevin99z commited on
Commit
ffe643a
1 Parent(s): 5590de4

initial commit

Browse files
.gitattributes CHANGED
@@ -53,3 +53,10 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
53
  *.jpg filter=lfs diff=lfs merge=lfs -text
54
  *.jpeg filter=lfs diff=lfs merge=lfs -text
55
  *.webp filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
53
  *.jpg filter=lfs diff=lfs merge=lfs -text
54
  *.jpeg filter=lfs diff=lfs merge=lfs -text
55
  *.webp filter=lfs diff=lfs merge=lfs -text
56
+ item_ids.json filter=lfs diff=lfs merge=lfs -text
57
+ relation2id.json filter=lfs diff=lfs merge=lfs -text
58
+ dbpedia_subkg.json filter=lfs diff=lfs merge=lfs -text
59
+ entity2id.json filter=lfs diff=lfs merge=lfs -text
60
+ test_data_dbpedia.jsonl filter=lfs diff=lfs merge=lfs -text
61
+ train_data_dbpedia.jsonl filter=lfs diff=lfs merge=lfs -text
62
+ valid_data_dbpedia.jsonl filter=lfs diff=lfs merge=lfs -text
dbpedia_subkg.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3d1a18f4d9164c750be2cd8c9644d17f0a081b89bc2f0a5bb612dd75fbc6638e
3
+ size 1538732
entity2id.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c957f90ce0dd5bfe393f08e6bd31bb92ca66d1bdba6a96b4f0fc55533a3cd6e2
3
+ size 1758081
item_ids.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:257830bb5a891acd6ac2a9deadd200f39bcccbb3f220a6545ae5bb2e968ca8dc
3
+ size 41760
redial_unicrs.py ADDED
@@ -0,0 +1,215 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import re
3
+ from typing import List, Dict
4
+ import html
5
+
6
+ import datasets
7
+ import pyarrow as pa
8
+
9
+ logger = datasets.logging.get_logger(__name__)
10
+
11
+
12
+ class RedialConfig(datasets.BuilderConfig):
13
+ """BuilderConfig for ReDIAL."""
14
+
15
+ def __init__(self, features, **kwargs):
16
+ """BuilderConfig for ReDIAL (used in UniCRS).
17
+
18
+ Args:
19
+ features: *list[string]*, list of the features that will appear in the
20
+ feature dict. Should not include "label".
21
+ **kwargs: keyword arguments forwarded to super.
22
+ """
23
+ super().__init__(version=datasets.Version("0.0.1"), **kwargs)
24
+ self.features = features
25
+
26
+
27
+ _URL = "./"
28
+ _URLS = {
29
+ "train": _URL + "train_data_dbpedia.jsonl",
30
+ "valid": _URL + "valid_data_dbpedia.jsonl",
31
+ "test": _URL + "test_data_dbpedia.jsonl",
32
+ "entity2id": _URL + "entity2id.json"
33
+ }
34
+
35
+
36
+ class ReDIAL(datasets.GeneratorBasedBuilder):
37
+ DEFAULT_CONFIG_NAME = "multiturn"
38
+ BUILDER_CONFIGS = [
39
+ RedialConfig(
40
+ name="multiturn",
41
+ description="The processed ReDIAL dataset in UniCRS. Each conversation yields multiple samples",
42
+ features={
43
+ "context": datasets.Sequence(datasets.Value("string")),
44
+ "resp": datasets.Value("string"),
45
+ "rec": datasets.Sequence(datasets.Value("int32")),
46
+ "entity": datasets.Sequence(datasets.Value("int32")),
47
+ },
48
+ ),
49
+ RedialConfig(
50
+ name="multiturn_masked",
51
+ description="",
52
+ features={
53
+ "context": datasets.Sequence(datasets.Value("string")),
54
+ "resp": datasets.Value("string"),
55
+ "rec": datasets.Sequence(datasets.Value("int32")),
56
+ "entity": datasets.Sequence(datasets.Value("int32")),
57
+ },
58
+ ),
59
+ RedialConfig(
60
+ name="compact",
61
+ description="Each conversation is one sample",
62
+ features={
63
+ "movieIds": datasets.Sequence(datasets.Value("string")),
64
+ "movieNames": datasets.Sequence(datasets.Value("string")),
65
+ "initiatorWorkerId": datasets.Value("int32"),
66
+ "messages": datasets.Sequence(datasets.Value("string")),
67
+ "senders": datasets.Sequence(datasets.Value("int32")),
68
+ "entities": datasets.Sequence(datasets.Sequence(datasets.Value("string"))),
69
+ "movies": datasets.Sequence(datasets.Sequence(datasets.Value("string")))
70
+ },
71
+ ),
72
+ ]
73
+ movie_pattern = re.compile(r'@\d+')
74
+
75
+ def __init__(self, **kwargs):
76
+ super().__init__(**kwargs)
77
+
78
+ def _info(self):
79
+ return datasets.DatasetInfo(
80
+ description=self.config.description,
81
+ features=datasets.Features(self.config.features),
82
+ )
83
+
84
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
85
+ urls_to_download = _URLS
86
+ downloaded_files = dl_manager.download_and_extract(urls_to_download)
87
+ entity2id_file = downloaded_files["entity2id"]
88
+ return [
89
+ datasets.SplitGenerator(name=datasets.Split.TRAIN,
90
+ gen_kwargs={"filepath": downloaded_files["train"], "entity2id": entity2id_file}),
91
+ datasets.SplitGenerator(name=datasets.Split.VALIDATION,
92
+ gen_kwargs={"filepath": downloaded_files["valid"], "entity2id": entity2id_file}),
93
+ datasets.SplitGenerator(name=datasets.Split.TEST,
94
+ gen_kwargs={"filepath": downloaded_files["test"], "entity2id": entity2id_file}),
95
+ ]
96
+
97
+ def _process_utt(self, utt, movieid2name, replace_movieId, remove_movie=False):
98
+ def convert(match):
99
+ movieid = match.group(0)[1:]
100
+ if movieid in movieid2name:
101
+ if remove_movie:
102
+ return '<movie>'
103
+ movie_name = movieid2name[movieid]
104
+ movie_name = ' '.join(movie_name.split())
105
+ return movie_name
106
+ else:
107
+ return match.group(0)
108
+
109
+ if replace_movieId:
110
+ utt = re.sub(self.movie_pattern, convert, utt)
111
+ utt = ' '.join(utt.split())
112
+ utt = html.unescape(utt)
113
+
114
+ return utt
115
+
116
+ def _generate_examples(self, filepath, entity2id):
117
+ """This function returns the examples in the raw (text) form."""
118
+ logger.info("generating examples from = %s", filepath)
119
+
120
+ with open(entity2id, 'r', encoding='utf-8') as f:
121
+ entity2id = json.load(f)
122
+ if "multiturn" in self.config.name:
123
+ mask_flag = "mask" in self.config.name
124
+ Idx = 0
125
+ with open(filepath, encoding="utf-8") as f:
126
+ for line in f:
127
+ dialog = json.loads(line)
128
+ if len(dialog['messages']) == 0:
129
+ continue
130
+
131
+ movieid2name = dialog['movieMentions']
132
+ user_id, resp_id = dialog['initiatorWorkerId'], dialog['respondentWorkerId']
133
+ context, resp = [], ''
134
+ entity_list = []
135
+ messages = dialog['messages']
136
+ turn_i = 0
137
+ while turn_i < len(messages):
138
+ worker_id = messages[turn_i]['senderWorkerId']
139
+ utt_turn = []
140
+ entity_turn = []
141
+ movie_turn = []
142
+ mask_utt_turn = []
143
+
144
+ turn_j = turn_i
145
+ while turn_j < len(messages) and messages[turn_j]['senderWorkerId'] == worker_id:
146
+ utt = self._process_utt(messages[turn_j]['text'], movieid2name, replace_movieId=True)
147
+ utt_turn.append(utt)
148
+
149
+ if mask_flag:
150
+ mask_utt = self._process_utt(messages[turn_j]['text'], movieid2name,
151
+ replace_movieId=True,
152
+ remove_movie=True)
153
+ mask_utt_turn.append(mask_utt)
154
+
155
+ entity_ids = [entity2id[entity] for entity in messages[turn_j]['entity'] if
156
+ entity in entity2id]
157
+ entity_turn.extend(entity_ids)
158
+
159
+ movie_ids = [entity2id[movie] for movie in messages[turn_j]['movie'] if movie in entity2id]
160
+ movie_turn.extend(movie_ids)
161
+
162
+ turn_j += 1
163
+
164
+ utt = ' '.join(utt_turn)
165
+ mask_utt = ' '.join(mask_utt_turn)
166
+
167
+ if mask_flag and worker_id == user_id:
168
+ context.append(utt)
169
+ entity_list.append(entity_turn + movie_turn)
170
+ else:
171
+ resp = utt
172
+
173
+ context_entity_list = [entity for entity_l in entity_list for entity in entity_l]
174
+ context_entity_list_extend = []
175
+
176
+ context_entity_list_extend += context_entity_list
177
+ context_entity_list_extend = list(set(context_entity_list_extend))
178
+
179
+ if len(context) == 0:
180
+ context.append('')
181
+ yield Idx, {
182
+ 'context': context,
183
+ 'resp': mask_utt if mask_flag else resp,
184
+ 'rec': movie_turn if mask_flag else list(set(movie_turn + entity_turn)),
185
+ 'entity': context_entity_list_extend,
186
+ }
187
+ Idx += 1
188
+
189
+ context.append(resp)
190
+ entity_list.append(movie_turn + entity_turn)
191
+
192
+ turn_i = turn_j
193
+ elif self.config.name == "compact":
194
+ Idx = 0
195
+ with open(filepath, encoding="utf-8") as f:
196
+ for line in f:
197
+ dialog = json.loads(line)
198
+ if len(dialog['messages']) == 0:
199
+ continue
200
+ messages = dialog['messages']
201
+
202
+ movieIds = [movieId for movieId in dialog["movieMentions"]]
203
+ yield Idx, {
204
+ "movieIds": movieIds,
205
+ "movieNames": [dialog["movieMentions"][id] for id in movieIds],
206
+ "initiatorWorkerId": dialog["initiatorWorkerId"],
207
+ "messages": [turn['text'] for turn in messages],
208
+ "senders": [turn["senderWorkerId"] for turn in messages],
209
+ "entities": [[entity2id[entity] for entity in turn['entity'] if entity in entity2id] for turn in
210
+ messages],
211
+ "movies": [[entity2id[entity] for entity in turn['movie'] if entity in entity2id] for turn in
212
+ messages]
213
+ }
214
+
215
+ Idx += 1
relation2id.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:43d6e261ec5a70bfac9db28aa8bd53f1140597816eb60d2c9be555a3381e2578
3
+ size 1136
test_data_dbpedia.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f47ae77276a5cbc7021b2221cd9aac8d8236dd094bd33bb89a29e089ce7e1c94
3
+ size 6924848
train_data_dbpedia.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f73ec13388bda6e990720d63d9ad4ac0e95579b8e443cafc26a72a8fa7b8d29f
3
+ size 47778009
valid_data_dbpedia.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ec4bb48e2557fe160f83f10d7d10e706fd701c08dbe25494fb4840d930ce6472
3
+ size 5331775