rrrick commited on
Commit
c8d06f7
1 Parent(s): f0960a8

Create dataloader.py

Browse files
Files changed (1) hide show
  1. dataloader.py +288 -0
dataloader.py ADDED
@@ -0,0 +1,288 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gzip
2
+ import os
3
+ import sys
4
+ import io
5
+ import re
6
+ import random
7
+ import csv
8
+ import numpy as np
9
+ import torch
10
+ csv.field_size_limit(sys.maxsize)
11
+
12
+ def clean_str(string, TREC=False):
13
+ """
14
+ Tokenization/string cleaning for all datasets except for SST.
15
+ Every dataset is lower cased except for TREC
16
+ """
17
+ string = re.sub(r"[^A-Za-z0-9(),!?\'\`]", " ", string)
18
+ string = re.sub(r"\'s", " \'s", string)
19
+ string = re.sub(r"\'ve", " \'ve", string)
20
+ string = re.sub(r"n\'t", " n\'t", string)
21
+ string = re.sub(r"\'re", " \'re", string)
22
+ string = re.sub(r"\'d", " \'d", string)
23
+ string = re.sub(r"\'ll", " \'ll", string)
24
+ string = re.sub(r",", " , ", string)
25
+ string = re.sub(r"!", " ! ", string)
26
+ string = re.sub(r"\(", " \( ", string)
27
+ string = re.sub(r"\)", " \) ", string)
28
+ string = re.sub(r"\?", " \? ", string)
29
+ string = re.sub(r"\s{2,}", " ", string)
30
+ return string.strip() if TREC else string.strip().lower()
31
+
32
+ def read_corpus(path, csvf=False , clean=True, MR=True, encoding='utf8', shuffle=False, lower=True):
33
+ data = []
34
+ labels = []
35
+ if not csvf:
36
+ with open(path, encoding=encoding) as fin:
37
+ for line in fin:
38
+ if MR:
39
+ label, sep, text = line.partition(' ')
40
+ label = int(label)
41
+ else:
42
+ label, sep, text = line.partition(',')
43
+ label = int(label) - 1
44
+ if clean:
45
+ text = clean_str(text.strip()) if clean else text.strip()
46
+ if lower:
47
+ text = text.lower()
48
+ labels.append(label)
49
+ data.append(text.split())
50
+ else:
51
+ with open(path, "r") as f:
52
+ reader = csv.reader(f, delimiter=",")
53
+ for line in reader:
54
+ text = line[0]
55
+ label = int(line[1])
56
+ if clean:
57
+ text = clean_str(text.strip()) if clean else text.strip()
58
+ if lower:
59
+ text = text.lower()
60
+ labels.append(label)
61
+ data.append(text.split())
62
+
63
+ if shuffle:
64
+ perm = list(range(len(data)))
65
+ random.shuffle(perm)
66
+ data = [data[i] for i in perm]
67
+ labels = [labels[i] for i in perm]
68
+
69
+ return data, labels
70
+
71
+ def read_MR(path, seed=1234):
72
+ file_path = os.path.join(path, "rt-polarity.all")
73
+ data, labels = read_corpus(file_path, encoding='latin-1')
74
+ random.seed(seed)
75
+ perm = list(range(len(data)))
76
+ random.shuffle(perm)
77
+ data = [ data[i] for i in perm ]
78
+ labels = [ labels[i] for i in perm ]
79
+ return data, labels
80
+
81
+ def read_SUBJ(path, seed=1234):
82
+ file_path = os.path.join(path, "subj.all")
83
+ data, labels = read_corpus(file_path, encoding='latin-1')
84
+ random.seed(seed)
85
+ perm = list(range(len(data)))
86
+ random.shuffle(perm)
87
+ data = [ data[i] for i in perm ]
88
+ labels = [ labels[i] for i in perm ]
89
+ return data, labels
90
+
91
+ def read_CR(path, seed=1234):
92
+ file_path = os.path.join(path, "custrev.all")
93
+ data, labels = read_corpus(file_path)
94
+ random.seed(seed)
95
+ perm = list(range(len(data)))
96
+ random.shuffle(perm)
97
+ data = [ data[i] for i in perm ]
98
+ labels = [ labels[i] for i in perm ]
99
+ return data, labels
100
+
101
+ def read_MPQA(path, seed=1234):
102
+ file_path = os.path.join(path, "mpqa.all")
103
+ data, labels = read_corpus(file_path)
104
+ random.seed(seed)
105
+ perm = list(range(len(data)))
106
+ random.shuffle(perm)
107
+ data = [ data[i] for i in perm ]
108
+ labels = [ labels[i] for i in perm ]
109
+ return data, labels
110
+
111
+ def read_TREC(path, seed=1234):
112
+ train_path = os.path.join(path, "TREC.train.all")
113
+ test_path = os.path.join(path, "TREC.test.all")
114
+ train_x, train_y = read_corpus(train_path, TREC=True, encoding='latin-1')
115
+ test_x, test_y = read_corpus(test_path, TREC=True, encoding='latin-1')
116
+ random.seed(seed)
117
+ perm = list(range(len(train_x)))
118
+ random.shuffle(perm)
119
+ train_x = [ train_x[i] for i in perm ]
120
+ train_y = [ train_y[i] for i in perm ]
121
+ return train_x, train_y, test_x, test_y
122
+
123
+ def read_SST(path, seed=1234):
124
+ train_path = os.path.join(path, "stsa.binary.phrases.train")
125
+ valid_path = os.path.join(path, "stsa.binary.dev")
126
+ test_path = os.path.join(path, "stsa.binary.test")
127
+ train_x, train_y = read_corpus(train_path, False)
128
+ valid_x, valid_y = read_corpus(valid_path, False)
129
+ test_x, test_y = read_corpus(test_path, False)
130
+ random.seed(seed)
131
+ perm = list(range(len(train_x)))
132
+ random.shuffle(perm)
133
+ train_x = [ train_x[i] for i in perm ]
134
+ train_y = [ train_y[i] for i in perm ]
135
+ return train_x, train_y, valid_x, valid_y, test_x, test_y
136
+
137
+ def cv_split(data, labels, nfold, test_id):
138
+ assert (nfold > 1) and (test_id >= 0) and (test_id < nfold)
139
+ lst_x = [ x for i, x in enumerate(data) if i%nfold != test_id ]
140
+ lst_y = [ y for i, y in enumerate(labels) if i%nfold != test_id ]
141
+ test_x = [ x for i, x in enumerate(data) if i%nfold == test_id ]
142
+ test_y = [ y for i, y in enumerate(labels) if i%nfold == test_id ]
143
+ perm = list(range(len(lst_x)))
144
+ random.shuffle(perm)
145
+ M = int(len(lst_x)*0.9)
146
+ train_x = [ lst_x[i] for i in perm[:M] ]
147
+ train_y = [ lst_y[i] for i in perm[:M] ]
148
+ valid_x = [ lst_x[i] for i in perm[M:] ]
149
+ valid_y = [ lst_y[i] for i in perm[M:] ]
150
+ return train_x, train_y, valid_x, valid_y, test_x, test_y
151
+
152
+ def cv_split2(data, labels, nfold, valid_id):
153
+ assert (nfold > 1) and (valid_id >= 0) and (valid_id < nfold)
154
+ train_x = [ x for i, x in enumerate(data) if i%nfold != valid_id ]
155
+ train_y = [ y for i, y in enumerate(labels) if i%nfold != valid_id ]
156
+ valid_x = [ x for i, x in enumerate(data) if i%nfold == valid_id ]
157
+ valid_y = [ y for i, y in enumerate(labels) if i%nfold == valid_id ]
158
+ return train_x, train_y, valid_x, valid_y
159
+
160
+ def pad(sequences, pad_token='<pad>', pad_left=True):
161
+ ''' input sequences is a list of text sequence [[str]]
162
+ pad each text sequence to the length of the longest
163
+ '''
164
+ max_len = max(5,max(len(seq) for seq in sequences))
165
+ if pad_left:
166
+ return [ [pad_token]*(max_len-len(seq)) + seq for seq in sequences ]
167
+ return [ seq + [pad_token]*(max_len-len(seq)) for seq in sequences ]
168
+
169
+
170
+ def create_one_batch(x, y, map2id, oov='<oov>'):
171
+ oov_id = map2id[oov]
172
+ x = pad(x)
173
+ length = len(x[0])
174
+ batch_size = len(x)
175
+ x = [ map2id.get(w, oov_id) for seq in x for w in seq ]
176
+ x = torch.LongTensor(x)
177
+ assert x.size(0) == length*batch_size
178
+ if torch.cuda.is_available():
179
+ return x.view(batch_size, length).t().contiguous().cuda(), torch.LongTensor(y).cuda()
180
+ else:
181
+ return x.view(batch_size, length).t().contiguous(), torch.LongTensor(y)
182
+
183
+ def create_one_batch_x(x, map2id, oov='<oov>'):
184
+ oov_id = map2id[oov]
185
+ x = pad(x)
186
+ length = len(x[0])
187
+ batch_size = len(x)
188
+ x = [ map2id.get(w, oov_id) for seq in x for w in seq ]
189
+ x = torch.LongTensor(x)
190
+ assert x.size(0) == length*batch_size
191
+ if torch.cuda.is_available():
192
+ return x.view(batch_size, length).t().contiguous().cuda()
193
+ else:
194
+ return x.view(batch_size, length).t().contiguous()
195
+
196
+
197
+ # shuffle training examples and create mini-batches
198
+ def create_batches(x, y, batch_size, map2id, perm=None, sort=False):
199
+
200
+ lst = perm or range(len(x))
201
+
202
+ # sort sequences based on their length; necessary for SST
203
+ if sort:
204
+ lst = sorted(lst, key=lambda i: len(x[i]))
205
+
206
+ x = [ x[i] for i in lst ]
207
+ y = [ y[i] for i in lst ]
208
+
209
+ sum_len = 0.
210
+ for ii in x:
211
+ sum_len += len(ii)
212
+ batches_x = [ ]
213
+ batches_y = [ ]
214
+ size = batch_size
215
+ nbatch = (len(x)-1) // size + 1
216
+ for i in range(nbatch):
217
+ bx, by = create_one_batch(x[i*size:(i+1)*size], y[i*size:(i+1)*size], map2id)
218
+ batches_x.append(bx)
219
+ batches_y.append(by)
220
+
221
+ if sort:
222
+ perm = list(range(nbatch))
223
+ random.shuffle(perm)
224
+ batches_x = [ batches_x[i] for i in perm ]
225
+ batches_y = [ batches_y[i] for i in perm ]
226
+
227
+ sys.stdout.write("{} batches, avg sent len: {:.1f}\n".format(
228
+ nbatch, sum_len/len(x)
229
+ ))
230
+
231
+ return batches_x, batches_y
232
+
233
+
234
+ # shuffle training examples and create mini-batches
235
+ def create_batches_x(x, batch_size, map2id, perm=None, sort=False):
236
+
237
+ lst = perm or range(len(x))
238
+
239
+ # sort sequences based on their length; necessary for SST
240
+ if sort:
241
+ lst = sorted(lst, key=lambda i: len(x[i]))
242
+
243
+ x = [ x[i] for i in lst ]
244
+
245
+ sum_len = 0.0
246
+ batches_x = [ ]
247
+ size = batch_size
248
+ nbatch = (len(x)-1) // size + 1
249
+ for i in range(nbatch):
250
+ bx = create_one_batch_x(x[i*size:(i+1)*size], map2id)
251
+ sum_len += len(bx)
252
+ batches_x.append(bx)
253
+
254
+ if sort:
255
+ perm = list(range(nbatch))
256
+ random.shuffle(perm)
257
+ batches_x = [ batches_x[i] for i in perm ]
258
+
259
+ # sys.stdout.write("{} batches, avg len: {:.1f}\n".format(
260
+ # nbatch, sum_len/nbatch
261
+ # ))
262
+
263
+ return batches_x
264
+
265
+
266
+ def load_embedding_npz(path):
267
+ data = np.load(path)
268
+ return [ w.decode('utf8') for w in data['words'] ], data['vals']
269
+
270
+ def load_embedding_txt(path):
271
+ file_open = gzip.open if path.endswith(".gz") else open
272
+ words = [ ]
273
+ vals = [ ]
274
+ with file_open(path, encoding='utf-8') as fin:
275
+ fin.readline()
276
+ for line in fin:
277
+ line = line.rstrip()
278
+ if line:
279
+ parts = line.split(' ')
280
+ words.append(parts[0])
281
+ vals += [ float(x) for x in parts[1:] ]
282
+ return words, np.asarray(vals).reshape(len(words),-1)
283
+
284
+ def load_embedding(path):
285
+ if path.endswith(".npz"):
286
+ return load_embedding_npz(path)
287
+ else:
288
+ return load_embedding_txt(path)