guillermoruiz commited on
Commit
b0d06b1
β€’
1 Parent(s): 0860043

Upload Bilma

Browse files
Files changed (2) hide show
  1. modeling_bilma.py +501 -9
  2. tf_model.h5 +2 -2
modeling_bilma.py CHANGED
@@ -1,7 +1,33 @@
1
  from transformers import TFPreTrainedModel
2
- import bilma_model as bm
 
 
 
 
 
 
 
3
  from configuration_bilma import BilmaConfig
4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
  class Bilma(TFPreTrainedModel):
6
  config_class = BilmaConfig
7
  main_input_name = "input_ids"
@@ -13,14 +39,480 @@ class Bilma(TFPreTrainedModel):
13
  # model_file = str((my_resources / "bilma_dataset_small_epoch-1_part-60.h5").joinpath())
14
  # self.model = bm.load(model_file)
15
  #else:
16
- self.model = bm.bilma(num_enc=config.num_encoders,
17
- embed_dim=config.embedding_dim,
18
- max_length=config.max_length,
19
- num_heads=config.num_attention_heads,
20
- ff_dim=config.embedding_dim,
21
- vocab_size=config.vocab_size,
22
- rate=config.drop_rate)
23
 
24
 
25
  def call(self, tensor):
26
- return self.model(tensor)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  from transformers import TFPreTrainedModel
2
+ from tensorflow.keras.models import Model, load_model, Sequential
3
+ from tensorflow.keras.layers import Layer, Dense, concatenate, Input, add, Dropout, LayerNormalization, MultiHeadAttention, Embedding
4
+ import tensorflow as tf
5
+ import numpy as np
6
+
7
+ import re
8
+ import unicodedata
9
+
10
  from configuration_bilma import BilmaConfig
11
 
12
+ # copied from preprocessing.py
13
+ BLANK = ' '
14
+
15
+ RE_OPS = re.I | re.M | re.S
16
+ RE_USR = re.compile(r"""@\S+""", RE_OPS)
17
+ RE_TAG = re.compile(r"""#\S+""", RE_OPS)
18
+ RE_URL = re.compile(r"""(http|ftp|https)://\S+""", RE_OPS)
19
+ RE_NUM = re.compile(r"""[-+]?\d+\.?\d*""", RE_OPS)
20
+
21
+ SYMBOLS_ = "()[]ΒΏ?Β‘!{}~<>|"
22
+ SYMBOLS = set(";:,.@\\-\"/" + SYMBOLS_)
23
+
24
+
25
+
26
+ # ------------------
27
+ # Class declaration
28
+ # ------------------
29
+
30
+
31
  class Bilma(TFPreTrainedModel):
32
  config_class = BilmaConfig
33
  main_input_name = "input_ids"
 
39
  # model_file = str((my_resources / "bilma_dataset_small_epoch-1_part-60.h5").joinpath())
40
  # self.model = bm.load(model_file)
41
  #else:
42
+ self.model = bilma(num_enc=config.num_encoders,
43
+ embed_dim=config.embedding_dim,
44
+ max_length=config.max_length,
45
+ num_heads=config.num_attention_heads,
46
+ ff_dim=config.embedding_dim,
47
+ vocab_size=config.vocab_size,
48
+ rate=config.drop_rate)
49
 
50
 
51
  def call(self, tensor):
52
+ return self.model(tensor)
53
+
54
+
55
+ #
56
+ # Copied from transformer_text.py
57
+ # -------------------------------
58
+
59
+ class EncoderBlock(Layer):
60
+ def __init__(self, patch_dim, num_heads, ff_dim, rate=0.1, **kwargs):
61
+ super(EncoderBlock, self).__init__(**kwargs)
62
+ self.p_d = patch_dim
63
+ self.n_h = num_heads
64
+ self.f_d = ff_dim
65
+ self.rate = rate
66
+
67
+ self.att = MultiHeadAttention(num_heads=num_heads, key_dim=patch_dim)
68
+ self.ffn = Sequential(
69
+ #[Conv1D(ff_dim, kernel_size=1, activation=tf.nn.gelu),
70
+ # Conv1D(patch_dim, kernel_size=1),]
71
+ [Dense(ff_dim, activation=tf.nn.gelu),
72
+ Dense(patch_dim),]
73
+ )
74
+ #self.layernorm0 = LayerNormalization(epsilon=1e-6)
75
+ self.layernorm1 = LayerNormalization(epsilon=1e-6)
76
+ self.layernorm2 = LayerNormalization(epsilon=1e-6)
77
+ self.dropout1 = Dropout(rate)
78
+ self.dropout2 = Dropout(rate)
79
+
80
+ def get_config(self):
81
+ config = super(EncoderBlock, self).get_config()
82
+ config.update({"patch_dim":self.p_d, "num_heads":self.n_h, "ff_dim":self.f_d, "rate":self.rate})
83
+ return config
84
+
85
+ def call(self, inputs, training=False):
86
+ #inputs = self.layernorm0(inputs)
87
+ attn_output = self.att(inputs, inputs)
88
+ attn_output = self.dropout1(attn_output, training=training)
89
+ out1 = self.layernorm1(add([inputs, attn_output]))
90
+ ffn_output = self.ffn(out1)
91
+ ffn_output = self.dropout2(ffn_output, training=training)
92
+ return self.layernorm2(add([out1, ffn_output]))
93
+
94
+
95
+ class DecoderBlock(Layer):
96
+ def __init__(self, embed_dim, num_heads, ff_dim, rate=0.1, **kwargs):
97
+ super(DecoderBlock, self).__init__(**kwargs)
98
+ self.e_d = embed_dim
99
+ self.n_h = num_heads
100
+ self.f_d = ff_dim
101
+ self.rate = rate
102
+
103
+ self.att1 = MultiHeadAttention(num_heads=num_heads, key_dim=embed_dim)
104
+ self.att2 = MultiHeadAttention(num_heads=num_heads, key_dim=embed_dim)
105
+ self.ffn = Sequential(
106
+ #[Conv1D(ff_dim, kernel_size=1, activation=tf.nn.gelu),
107
+ # Conv1D(embed_dim, kernel_size=1),]
108
+ [Dense(ff_dim, activation=tf.nn.gelu),
109
+ Dense(embed_dim),]
110
+ )
111
+ self.layernorm1 = LayerNormalization(epsilon=1e-6)
112
+ self.layernorm2 = LayerNormalization(epsilon=1e-6)
113
+ self.dropout1 = Dropout(rate)
114
+ self.dropout2 = Dropout(rate)
115
+ self.dropout3 = Dropout(rate)
116
+
117
+ def get_config(self):
118
+ config = super(DecoderBlock, self).get_config()
119
+ config.update({"embed_dim":self.e_d, "num_heads":self.n_h, "ff_dim":self.f_d, "rate":self.rate})
120
+ return config
121
+
122
+ def call(self, inputs, encoder_output, look_ahead_mask, padding_mask, training=None):
123
+ y, attn_output1 = self.att1(inputs, inputs, attention_mask=look_ahead_mask, return_attention_scores=True)
124
+ y = self.dropout1(y, training=training)
125
+ y = add([inputs, y])
126
+ out1 = self.layernorm1(y)
127
+
128
+ y, attn_encoder = self.att2(out1, encoder_output, attention_mask=padding_mask, return_attention_scores=True)
129
+ y = self.dropout2(y, training=training)
130
+ y = add([out1, y])
131
+ out2 = self.layernorm1(y)
132
+
133
+ ffn_output = self.ffn(out2)
134
+ ffn_output = self.dropout3(ffn_output, training=training)
135
+ final_output = self.layernorm2(out2 + ffn_output)
136
+
137
+ return final_output, attn_output1, attn_encoder
138
+
139
+
140
+ class Encoder(Layer):
141
+ def __init__(self, n, embed_dim, max_length, num_heads, ff_dim, rate=0.1, **kwargs):
142
+ super(Encoder, self).__init__(**kwargs)
143
+ self.n = n
144
+ self.embed_dim = embed_dim
145
+ self.max_length = max_length
146
+ self.n_h = num_heads
147
+ self.f_d = ff_dim
148
+ self.rate = rate
149
+ self._layers = [EncoderBlock(embed_dim, num_heads, ff_dim, rate=0.1) for _ in range(n)]
150
+ self.pe = positional_encoding(self.max_length, self.embed_dim)
151
+
152
+ def get_config(self):
153
+ config = super(Encoder, self).get_config()
154
+ config.update({"n": self.n, "embed_dim":self.embed_dim, "max_length": self.max_length, "num_heads":self.n_h, "ff_dim":self.f_d, "rate":self.rate})
155
+ return config
156
+
157
+ def call(self, x, training=False):
158
+ x *= tf.math.sqrt(tf.cast(self.embed_dim, tf.float32))
159
+ x = x + self.pe[:, :tf.shape(x)[1], :]
160
+ for layer in self._layers:
161
+ x = layer(x, training)
162
+ return x
163
+
164
+
165
+ class Decoder(Layer):
166
+ def __init__(self, n, embed_dim, max_length, num_heads, ff_dim, rate=0.1, **kwargs):
167
+ super(Decoder, self).__init__(**kwargs)
168
+ self.n = n
169
+ self.embed_dim = embed_dim
170
+ self.max_length = max_length
171
+ self.n_h = num_heads
172
+ self.f_d = ff_dim
173
+ self.rate = rate
174
+ self._layers = [DecoderBlock(embed_dim, num_heads, ff_dim, rate=0.1) for _ in range(n)]
175
+ self.pe = positional_encoding(self.max_length, self.embed_dim)
176
+
177
+ def get_config(self):
178
+ config = super(Decoder, self).get_config()
179
+ config.update({"n": self.n, "embed_dim":self.embed_dim, "max_length": self.max_length, "num_heads":self.n_h, "ff_dim":self.f_d, "rate":self.rate})
180
+ return config
181
+
182
+ def call(self, x, encoder_output, look_ahead_mask, padding_mask, training):
183
+ x *= tf.math.sqrt(tf.cast(self.embed_dim, tf.float32))
184
+ x = x + self.pe[:, :tf.shape(x)[1], :]
185
+
186
+ for layer in self._layers:
187
+ x, self_att, enc_att = layer(x, encoder_output, look_ahead_mask, padding_mask, training)
188
+
189
+ return x
190
+
191
+
192
+
193
+
194
+ # =========================================
195
+ # M A S K S
196
+ # =========================================
197
+ def create_padding_mask(seq):
198
+ """
199
+ For self-attention
200
+ seq shape(bs, max_length, emb_dim)
201
+ output shape (bs, max_length, max_length)
202
+ """
203
+ mask = tf.cast(tf.not_equal(seq, 0), tf.bool)
204
+ mask = tf.reduce_any(mask, 2)
205
+ mask = tf.repeat(mask, seq.shape[1], 0)
206
+ mask = tf.reshape(mask, (-1,seq.shape[1], seq.shape[1]))
207
+ return tf.cast(mask, tf.float32)
208
+
209
+
210
+ def create_cross_padding_mask(seq, target_seq):
211
+ """
212
+ For cross-attention
213
+ seq shape(bs, k, image_features)
214
+ target_seq(bs, max_length, emb_dim)
215
+ output shape (bs, max_length, k)
216
+ """
217
+ mask = tf.cast(tf.not_equal(target_seq, 0), tf.bool)
218
+ mask = tf.reduce_any(mask, 2)
219
+ mask = tf.repeat(mask, seq.shape[1], 0)
220
+ mask = tf.reshape(mask, (-1, tf.shape(seq)[1], tf.shape(target_seq)[1]))
221
+ mask = tf.transpose(mask, [0, 2, 1])
222
+ return mask
223
+
224
+
225
+ def create_look_ahead_mask(seq):
226
+ """
227
+ seq shape(bs, max_length, emb_dim)
228
+ output 2D matrix of shape (bs, max_length, max_length) with ones on the diagonal and below.
229
+ """
230
+ size = seq.shape[1]
231
+ mask = tf.linalg.band_part(tf.ones((size, size)), -1, 0)
232
+ mask = tf.expand_dims(mask, 0)
233
+ mask = tf.repeat(mask, tf.shape(seq)[0], 0)
234
+ return mask
235
+
236
+
237
+ def create_masks(seq, target_seq):
238
+ decoder_mask = create_padding_mask(target_seq)
239
+ decoder_mask *= create_look_ahead_mask(target_seq)
240
+ cross_att_mask = create_cross_padding_mask(seq, target_seq)
241
+ return decoder_mask, cross_att_mask
242
+
243
+
244
+ def create_masks_looking_ahead(seq, target_seq):
245
+ decoder_mask = create_padding_mask(target_seq)
246
+ cross_att_mask = create_cross_padding_mask(seq, target_seq)
247
+ return decoder_mask, cross_att_mask
248
+
249
+ # =========================================
250
+ # P O S I T I O N A L E N C O D I N G
251
+ # =========================================
252
+ def get_angles(pos, i, d_model):
253
+ angle_rates = 1 / np.power(10000, (2 * (i//2)) / np.float32(d_model))
254
+ return pos * angle_rates
255
+
256
+ @tf.autograph.experimental.do_not_convert
257
+ def positional_encoding(position, d_model):
258
+ angle_rads = get_angles(np.arange(position)[:, np.newaxis],
259
+ np.arange(d_model)[np.newaxis, :],
260
+ d_model)
261
+
262
+ # apply sin to even indices in the array; 2i
263
+ angle_rads[:, 0::2] = np.sin(angle_rads[:, 0::2])
264
+
265
+ # apply cos to odd indices in the array; 2i+1
266
+ angle_rads[:, 1::2] = np.cos(angle_rads[:, 1::2])
267
+
268
+ pos_encoding = angle_rads[np.newaxis, ...]
269
+
270
+ return tf.cast(pos_encoding, dtype=tf.float32)
271
+
272
+ class PatchEncoder(Layer):
273
+ def __init__(self, num_patches, projection_dim, **kwargs):
274
+ super(PatchEncoder, self).__init__(**kwargs)
275
+ self.num_patches = num_patches
276
+ self.projection_dim = projection_dim
277
+ self.projection = Dense(units=projection_dim)
278
+ self.position_embedding = Embedding(
279
+ input_dim=num_patches, output_dim=projection_dim
280
+ )
281
+
282
+ def get_config(self):
283
+ config = super(PatchEncoder, self).get_config()
284
+ config.update({"num_patches": self.num_patches, "projection_dim":self.projection_dim})
285
+ return config
286
+
287
+ def call(self, patch):
288
+ positions = tf.range(start=0, limit=self.num_patches, delta=1)
289
+ encoded = self.projection(patch) + self.position_embedding(positions)
290
+ return encoded
291
+
292
+
293
+
294
+ # Copied from preprocessing.py
295
+ # ----------------------------
296
+ def norm_chars(text):
297
+ L = []
298
+
299
+ for u in unicodedata.normalize('NFD', text):
300
+ o = ord(u)
301
+ if 0x300 <= o and o <= 0x036F:
302
+ continue
303
+
304
+ if u in ('\n', '\r', BLANK, '\t'):
305
+ if len(L) == 0:
306
+ continue
307
+
308
+ u = BLANK
309
+
310
+ if u in SYMBOLS:
311
+ if len(L) > 0 and L[-1] != BLANK:
312
+ L.append(BLANK)
313
+
314
+ L.append(u)
315
+ L.append(BLANK)
316
+ continue
317
+
318
+ L.append(u)
319
+
320
+ return "".join(L)
321
+
322
+
323
+ def preprocess(text):
324
+ text = RE_URL.sub("_url ", text)
325
+ text = RE_USR.sub("_usr ", text)
326
+ #text = RE_TAG.sub("_htag ", text)
327
+ #text = RE_NUM.sub("0 ", text)
328
+ text = re.sub(r"&amp;", "&", text)
329
+ text = re.sub(r"&gt;", ">", text)
330
+ text = re.sub(r"&lt;", "<", text)
331
+ #text = norm_chars(text.lower())
332
+ text = re.sub(r"j(a|e|i)[jaei]+", r"j\1j\1", text)
333
+ text = re.sub(r"h(a|e|i)[haei]+", r"j\1j\1", text)
334
+ return re.sub(r"\s+", BLANK, text)
335
+
336
+
337
+
338
+ # Copied from wordpiece_tokenizer_ex.py
339
+ # -------------------------------------
340
+
341
+ class Tokenizer():
342
+ def __init__(self, vocab_file, unk_token="[UNK]", end_token="[END]", mask_token="[MASK]"):
343
+ self.word2idx = {}
344
+ self.idx2word = []
345
+ c = 0
346
+ with open(vocab_file, "r", encoding="utf8") as f:
347
+ while True:
348
+ line = f.readline()
349
+ if not line:
350
+ break
351
+ self.word2idx[line[0:-1]] = c
352
+ self.idx2word.append(line[0:-1])
353
+ c += 1
354
+ self.n_jobs = 2
355
+ self.UNK = unk_token
356
+ self.END = end_token
357
+ self.MASK = mask_token
358
+
359
+ def split(self, s):
360
+ split = []
361
+ i = 0
362
+ while i < len(s):
363
+ for j in range(i, len(s)):
364
+ if (i==j and s[j:j+6] == self.MASK):
365
+ split.append(self.MASK)
366
+ i = j + 6
367
+ break
368
+ if (s[j].isalnum()):
369
+ continue
370
+ if (j==i):
371
+ if (s[j] != " "):
372
+ split.append(s[i:j+1])
373
+ i = j + 1
374
+ break
375
+ split.append(s[i:j])
376
+ i = j
377
+ break
378
+ else:
379
+ split.append(s[i:j+1])
380
+ i=j+1
381
+ return split
382
+
383
+ def tokenize(self, S):
384
+ #return Parallel(n_jobs=self.n_jobs)(delayed(self._tokenize)(s) for s in S)
385
+ return [self._tokenize(s) for s in S]
386
+
387
+ def detokenize(self, S, human_readable=True):
388
+ #return Parallel(n_jobs=self.n_jobs)(delayed(self._detokenize)(s) for s in S)
389
+ return [self._detokenize(s, human_readable=human_readable) for s in S]
390
+
391
+ def _tokenize(self, s):
392
+ tokens = []
393
+ s = s.rstrip('\n')
394
+ for w in self.split(s):
395
+ if w in self.word2idx:
396
+ tokens.append(self.word2idx[w])
397
+ else:
398
+ if (len(w)==1):
399
+ tokens.append(self.word2idx["[UNK]"])
400
+ continue
401
+
402
+ subtoken = []
403
+ l = 0
404
+ while len(w)>l:
405
+
406
+ for i in range(len(w),l-1,-1):
407
+ if (w[0: i] in self.word2idx):
408
+ subtoken.append(self.word2idx[w[0: i]])
409
+ break
410
+ if (i == l):
411
+ subtoken = [self.word2idx["[UNK]"]]
412
+ break
413
+ w = "##" + w[i: ]
414
+ l = 2
415
+ tokens += subtoken
416
+ return tokens
417
+
418
+
419
+ def _detokenize(self, tokens, human_readable=True):
420
+ sentence = []
421
+ start = 0 if human_readable == False else 1
422
+
423
+ for t in tokens[start:]:
424
+ c = self.idx2word[t]
425
+ if (human_readable and c == self.END):
426
+ break
427
+ sentence.append(c)
428
+ return sentence
429
+
430
+
431
+
432
+ # copied from bilma_model.py
433
+ # --------------------------
434
+
435
+ def loss_function(ignore_id=0):
436
+ loss_object = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True, reduction='none')
437
+ def loss(real, pred):
438
+ mask = tf.math.logical_not(tf.math.equal(real, ignore_id))
439
+ loss_ = loss_object(real, pred)
440
+ mask = tf.cast(mask, dtype=loss_.dtype)
441
+ loss_ *= mask
442
+ sum_ = tf.reduce_sum(mask,axis=1)
443
+
444
+ loss_ = tf.math.divide_no_nan(tf.reduce_sum(loss_, axis=1), sum_)
445
+ return loss_
446
+ return loss
447
+
448
+ def accuracy_function(ignore_id=0):
449
+ def acc_mlm(real, pred):
450
+ accuracies = tf.equal(tf.cast(real, tf.int64), tf.argmax(pred, axis=2))
451
+
452
+ mask = tf.math.logical_not(tf.math.equal(real, ignore_id))
453
+ accuracies = tf.math.logical_and(mask, accuracies)
454
+
455
+ accuracies = tf.cast(accuracies, dtype=tf.float32)
456
+ mask = tf.cast(mask, dtype=tf.float32)
457
+ return tf.math.divide_no_nan(tf.reduce_sum(accuracies), tf.reduce_sum(mask))
458
+ return acc_mlm
459
+
460
+ def bilma(num_enc=6, embed_dim=300, max_length=50, num_heads=6, ff_dim=512, vocab_size=9739, rate=0.1):
461
+ capt_inputs_ids = Input(shape=(max_length, ), name='capt_input')
462
+ capt_embedding = Embedding(vocab_size, embed_dim, mask_zero=False, name="embedding")
463
+ capt_inputs = capt_embedding(capt_inputs_ids)
464
+
465
+ enc = Encoder(num_enc, embed_dim, max_length, num_heads, ff_dim, rate=rate)
466
+ enc_output = enc(capt_inputs)
467
+ fin_output = Dense(vocab_size, use_bias=True)(enc_output)
468
+
469
+ caption_model = Model(inputs=capt_inputs_ids, outputs=[fin_output])
470
+ return caption_model
471
+
472
+ def load(model_file):
473
+ custom_objects={"EncoderBlock": EncoderBlock,
474
+ "Encoder": Encoder,
475
+ "loss": loss_function(),
476
+ "acc_mlm":accuracy_function(),
477
+ }
478
+ return load_model(model_file, custom_objects=custom_objects)
479
+
480
+ class tokenizer():
481
+ def __init__(self, vocab_file, max_length):
482
+ self.tokenizer = Tokenizer(vocab_file)
483
+ self.emo_labels = "β€πŸ‘ŒπŸ‘πŸ’”πŸ˜„πŸ˜ŠπŸ˜ŒπŸ˜πŸ˜’πŸ˜˜πŸ˜‘πŸ˜’πŸ˜­πŸ€”πŸ₯Ί"
484
+ self.max_length = max_length
485
+ self.START = 2
486
+ self.END = 3
487
+ self.PAD = 0
488
+ self.MASK = 4
489
+
490
+ def tokenize(self, text):
491
+ text = [preprocess(t) for t in text]
492
+ tokens = tf.ragged.constant(self.tokenizer.tokenize(text), tf.int32)
493
+ count, _ = tokens.bounding_shape()
494
+ starts = tf.fill([count,1], self.START)
495
+ ends = tf.fill([count,1], self.END)
496
+ tokens = tf.concat([starts, tokens[:, 0: self.max_length - 2], ends], axis=1)
497
+ tokens = tokens.to_tensor(self.PAD, shape=(len(text), self.max_length))
498
+ return tokens.numpy()
499
+
500
+ def detokenize(self, tokens, human_readable=True):
501
+ words = self.tokenizer.detokenize(tokens, human_readable=human_readable)
502
+ if (human_readable==True):
503
+ return [" ".join(w) for w in words]
504
+ text = tf.strings.reduce_join(words, separator=' ', axis=-1)
505
+ return text
506
+
507
+ def top_k(self, predictions, positions, k=10):
508
+ top = []
509
+ for p, m in zip(predictions, positions):
510
+ top_k = self.detokenize([tf.argsort(p[m])[-k:][::-1]], False).numpy()[0].decode('utf8').split()
511
+ top.append(top_k)
512
+ return top
513
+
514
+ def decode_emo(self, predictions):
515
+ emo = tf.argmax(predictions, axis=-1)
516
+ return [self.emo_labels[i] for i in emo]
517
+
518
+
tf_model.h5 CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:66f9f5b3091dc16d4b903116f60425fae780bd2ed34fdcf74e4585d7d5024b17
3
- size 156561684
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:23a49d18b00cad9b1f4e5b5ea309dd22097c44165de8fcbece833491d4968211
3
+ size 156561756