serdaryildiz commited on
Commit
af06dba
·
verified ·
1 Parent(s): f4dc407

Upload 24 files

Browse files
.gitignore ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ /checkpoints/
2
+ .idea
Model/__init__.py ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ from .trcaptionnet import TRCaptionNet
2
+ from .clip.clip import _transform as clip_transform
Model/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (257 Bytes). View file
 
Model/__pycache__/trcaptionnet.cpython-310.pyc ADDED
Binary file (3.6 kB). View file
 
Model/bert/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ from .med import BertLMHeadModel, BertConfig
Model/bert/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (217 Bytes). View file
 
Model/bert/__pycache__/med.cpython-310.pyc ADDED
Binary file (27.2 kB). View file
 
Model/bert/med.py ADDED
@@ -0,0 +1,940 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ '''
2
+ * Based on huggingface code base
3
+ * https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bert
4
+ '''
5
+
6
+ import math
7
+ from typing import Tuple
8
+
9
+ import torch
10
+ from torch import Tensor, device
11
+ import torch.utils.checkpoint
12
+ from torch import nn
13
+ from torch.nn import CrossEntropyLoss
14
+
15
+ from transformers.activations import ACT2FN
16
+ from transformers.modeling_outputs import (
17
+ BaseModelOutputWithPastAndCrossAttentions,
18
+ BaseModelOutputWithPoolingAndCrossAttentions,
19
+ CausalLMOutputWithCrossAttentions,
20
+ )
21
+ from transformers.modeling_utils import (
22
+ PreTrainedModel,
23
+ apply_chunking_to_forward,
24
+ find_pruneable_heads_and_indices,
25
+ prune_linear_layer,
26
+ )
27
+ from transformers.utils import logging
28
+ from transformers.models.bert.configuration_bert import BertConfig
29
+
30
+
31
+ logger = logging.get_logger(__name__)
32
+
33
+
34
+ class BertEmbeddings(nn.Module):
35
+ """Construct the embeddings from word and position embeddings."""
36
+
37
+ def __init__(self, config):
38
+ super().__init__()
39
+ self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
40
+ self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
41
+
42
+ # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
43
+ # any TensorFlow checkpoint file
44
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
45
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
46
+
47
+ # position_ids (1, len position emb) is contiguous in memory and exported when serialized
48
+ self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)))
49
+ self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
50
+
51
+ self.config = config
52
+
53
+ def forward(
54
+ self, input_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0
55
+ ):
56
+ if input_ids is not None:
57
+ input_shape = input_ids.size()
58
+ else:
59
+ input_shape = inputs_embeds.size()[:-1]
60
+
61
+ seq_length = input_shape[1]
62
+
63
+ if position_ids is None:
64
+ position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length]
65
+
66
+ if inputs_embeds is None:
67
+ inputs_embeds = self.word_embeddings(input_ids)
68
+
69
+ embeddings = inputs_embeds
70
+
71
+ if self.position_embedding_type == "absolute":
72
+ position_embeddings = self.position_embeddings(position_ids)
73
+ embeddings += position_embeddings
74
+ embeddings = self.LayerNorm(embeddings)
75
+ embeddings = self.dropout(embeddings)
76
+ return embeddings
77
+
78
+
79
+ class BertSelfAttention(nn.Module):
80
+ def __init__(self, config, is_cross_attention):
81
+ super().__init__()
82
+ self.config = config
83
+ if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
84
+ raise ValueError(
85
+ "The hidden size (%d) is not a multiple of the number of attention "
86
+ "heads (%d)" % (config.hidden_size, config.num_attention_heads)
87
+ )
88
+
89
+ self.num_attention_heads = config.num_attention_heads
90
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
91
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
92
+
93
+ # self.query = nn.Linear(config.hidden_size, self.all_head_size)
94
+ # if is_cross_attention:
95
+ # self.key = nn.Linear(config.encoder_width, self.all_head_size)
96
+ # self.value = nn.Linear(config.encoder_width, self.all_head_size)
97
+ # else:
98
+ # self.key = nn.Linear(config.hidden_size, self.all_head_size)
99
+ # self.value = nn.Linear(config.hidden_size, self.all_head_size)
100
+ self.query = nn.Linear(config.hidden_size, self.all_head_size)
101
+ self.key = nn.Linear(config.hidden_size, self.all_head_size)
102
+ self.value = nn.Linear(config.hidden_size, self.all_head_size)
103
+
104
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
105
+ self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
106
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
107
+ self.max_position_embeddings = config.max_position_embeddings
108
+ self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
109
+ self.save_attention = False
110
+
111
+ def save_attn_gradients(self, attn_gradients):
112
+ self.attn_gradients = attn_gradients
113
+
114
+ def get_attn_gradients(self):
115
+ return self.attn_gradients
116
+
117
+ def save_attention_map(self, attention_map):
118
+ self.attention_map = attention_map
119
+
120
+ def get_attention_map(self):
121
+ return self.attention_map
122
+
123
+ def transpose_for_scores(self, x):
124
+ new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
125
+ x = x.view(*new_x_shape)
126
+ return x.permute(0, 2, 1, 3)
127
+
128
+ def forward(
129
+ self,
130
+ hidden_states,
131
+ attention_mask=None,
132
+ head_mask=None,
133
+ encoder_hidden_states=None,
134
+ encoder_attention_mask=None,
135
+ past_key_value=None,
136
+ output_attentions=False,
137
+ ):
138
+ mixed_query_layer = self.query(hidden_states)
139
+
140
+ # If this is instantiated as a cross-attention module, the keys
141
+ # and values come from an encoder; the attention mask needs to be
142
+ # such that the encoder's padding tokens are not attended to.
143
+ is_cross_attention = encoder_hidden_states is not None
144
+
145
+ if is_cross_attention:
146
+ key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))
147
+ value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))
148
+ attention_mask = encoder_attention_mask
149
+ elif past_key_value is not None:
150
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
151
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
152
+ key_layer = torch.cat([past_key_value[0], key_layer], dim=2)
153
+ value_layer = torch.cat([past_key_value[1], value_layer], dim=2)
154
+ else:
155
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
156
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
157
+
158
+ query_layer = self.transpose_for_scores(mixed_query_layer)
159
+
160
+ past_key_value = (key_layer, value_layer)
161
+
162
+ # Take the dot product between "query" and "key" to get the raw attention scores.
163
+ attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
164
+
165
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
166
+ seq_length = hidden_states.size()[1]
167
+ position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
168
+ position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
169
+ distance = position_ids_l - position_ids_r
170
+ positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
171
+ positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility
172
+
173
+ if self.position_embedding_type == "relative_key":
174
+ relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
175
+ attention_scores = attention_scores + relative_position_scores
176
+ elif self.position_embedding_type == "relative_key_query":
177
+ relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
178
+ relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding)
179
+ attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
180
+
181
+ attention_scores = attention_scores / math.sqrt(self.attention_head_size)
182
+ if attention_mask is not None:
183
+ # Apply the attention mask is (precomputed for all layers in BertModel forward() function)
184
+ attention_scores = attention_scores + attention_mask
185
+
186
+ # Normalize the attention scores to probabilities.
187
+ attention_probs = nn.Softmax(dim=-1)(attention_scores)
188
+
189
+ if is_cross_attention and self.save_attention:
190
+ self.save_attention_map(attention_probs)
191
+ attention_probs.register_hook(self.save_attn_gradients)
192
+
193
+ # This is actually dropping out entire tokens to attend to, which might
194
+ # seem a bit unusual, but is taken from the original Transformer paper.
195
+ attention_probs_dropped = self.dropout(attention_probs)
196
+
197
+ # Mask heads if we want to
198
+ if head_mask is not None:
199
+ attention_probs_dropped = attention_probs_dropped * head_mask
200
+
201
+ context_layer = torch.matmul(attention_probs_dropped, value_layer)
202
+
203
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
204
+ new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
205
+ context_layer = context_layer.view(*new_context_layer_shape)
206
+
207
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
208
+
209
+ outputs = outputs + (past_key_value,)
210
+ return outputs
211
+
212
+
213
+ class BertSelfOutput(nn.Module):
214
+ def __init__(self, config):
215
+ super().__init__()
216
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
217
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
218
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
219
+
220
+ def forward(self, hidden_states, input_tensor):
221
+ hidden_states = self.dense(hidden_states)
222
+ hidden_states = self.dropout(hidden_states)
223
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
224
+ return hidden_states
225
+
226
+
227
+ class BertAttention(nn.Module):
228
+ def __init__(self, config, is_cross_attention=False):
229
+ super().__init__()
230
+ self.self = BertSelfAttention(config, is_cross_attention)
231
+ self.output = BertSelfOutput(config)
232
+ self.pruned_heads = set()
233
+
234
+ def prune_heads(self, heads):
235
+ if len(heads) == 0:
236
+ return
237
+ heads, index = find_pruneable_heads_and_indices(
238
+ heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
239
+ )
240
+
241
+ # Prune linear layers
242
+ self.self.query = prune_linear_layer(self.self.query, index)
243
+ self.self.key = prune_linear_layer(self.self.key, index)
244
+ self.self.value = prune_linear_layer(self.self.value, index)
245
+ self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
246
+
247
+ # Update hyper params and store pruned heads
248
+ self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
249
+ self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
250
+ self.pruned_heads = self.pruned_heads.union(heads)
251
+
252
+ def forward(
253
+ self,
254
+ hidden_states,
255
+ attention_mask=None,
256
+ head_mask=None,
257
+ encoder_hidden_states=None,
258
+ encoder_attention_mask=None,
259
+ past_key_value=None,
260
+ output_attentions=False,
261
+ ):
262
+ self_outputs = self.self(
263
+ hidden_states,
264
+ attention_mask,
265
+ head_mask,
266
+ encoder_hidden_states,
267
+ encoder_attention_mask,
268
+ past_key_value,
269
+ output_attentions,
270
+ )
271
+ attention_output = self.output(self_outputs[0], hidden_states)
272
+ outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
273
+ return outputs
274
+
275
+
276
+ class BertIntermediate(nn.Module):
277
+ def __init__(self, config):
278
+ super().__init__()
279
+ self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
280
+ if isinstance(config.hidden_act, str):
281
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
282
+ else:
283
+ self.intermediate_act_fn = config.hidden_act
284
+
285
+ def forward(self, hidden_states):
286
+ hidden_states = self.dense(hidden_states)
287
+ hidden_states = self.intermediate_act_fn(hidden_states)
288
+ return hidden_states
289
+
290
+
291
+ class BertOutput(nn.Module):
292
+ def __init__(self, config):
293
+ super().__init__()
294
+ self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
295
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
296
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
297
+
298
+ def forward(self, hidden_states, input_tensor):
299
+ hidden_states = self.dense(hidden_states)
300
+ hidden_states = self.dropout(hidden_states)
301
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
302
+ return hidden_states
303
+
304
+
305
+ class BertLayer(nn.Module):
306
+ def __init__(self, config, layer_num):
307
+ super().__init__()
308
+ self.config = config
309
+ self.chunk_size_feed_forward = config.chunk_size_feed_forward
310
+ self.seq_len_dim = 1
311
+ self.attention = BertAttention(config)
312
+ self.layer_num = layer_num
313
+ if self.config.add_cross_attention:
314
+ self.crossattention = BertAttention(config, is_cross_attention=self.config.add_cross_attention)
315
+ self.intermediate = BertIntermediate(config)
316
+ self.output = BertOutput(config)
317
+
318
+ def forward(
319
+ self,
320
+ hidden_states,
321
+ attention_mask=None,
322
+ head_mask=None,
323
+ encoder_hidden_states=None,
324
+ encoder_attention_mask=None,
325
+ past_key_value=None,
326
+ output_attentions=False,
327
+ mode=None,
328
+ ):
329
+ # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
330
+ self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
331
+ self_attention_outputs = self.attention(
332
+ hidden_states,
333
+ attention_mask,
334
+ head_mask,
335
+ output_attentions=output_attentions,
336
+ past_key_value=self_attn_past_key_value,
337
+ )
338
+ attention_output = self_attention_outputs[0]
339
+
340
+ outputs = self_attention_outputs[1:-1]
341
+ present_key_value = self_attention_outputs[-1]
342
+
343
+ if mode=='multimodal':
344
+ assert encoder_hidden_states is not None, "encoder_hidden_states must be given for cross-attention layers"
345
+
346
+ cross_attention_outputs = self.crossattention(
347
+ attention_output,
348
+ attention_mask,
349
+ head_mask,
350
+ encoder_hidden_states,
351
+ encoder_attention_mask,
352
+ output_attentions=output_attentions,
353
+ )
354
+ attention_output = cross_attention_outputs[0]
355
+ outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights
356
+ layer_output = apply_chunking_to_forward(
357
+ self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
358
+ )
359
+ outputs = (layer_output,) + outputs
360
+
361
+ outputs = outputs + (present_key_value,)
362
+
363
+ return outputs
364
+
365
+ def feed_forward_chunk(self, attention_output):
366
+ intermediate_output = self.intermediate(attention_output)
367
+ layer_output = self.output(intermediate_output, attention_output)
368
+ return layer_output
369
+
370
+
371
+ class BertEncoder(nn.Module):
372
+ def __init__(self, config):
373
+ super().__init__()
374
+ self.config = config
375
+ self.layer = nn.ModuleList([BertLayer(config,i) for i in range(config.num_hidden_layers)])
376
+ self.gradient_checkpointing = False
377
+
378
+ def forward(
379
+ self,
380
+ hidden_states,
381
+ attention_mask=None,
382
+ head_mask=None,
383
+ encoder_hidden_states=None,
384
+ encoder_attention_mask=None,
385
+ past_key_values=None,
386
+ use_cache=None,
387
+ output_attentions=False,
388
+ output_hidden_states=False,
389
+ return_dict=True,
390
+ mode='multimodal',
391
+ ):
392
+ all_hidden_states = () if output_hidden_states else None
393
+ all_self_attentions = () if output_attentions else None
394
+ all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
395
+
396
+ next_decoder_cache = () if use_cache else None
397
+
398
+ for i in range(self.config.num_hidden_layers):
399
+ layer_module = self.layer[i]
400
+ if output_hidden_states:
401
+ all_hidden_states = all_hidden_states + (hidden_states,)
402
+
403
+ layer_head_mask = head_mask[i] if head_mask is not None else None
404
+ past_key_value = past_key_values[i] if past_key_values is not None else None
405
+
406
+ if self.gradient_checkpointing and self.training:
407
+
408
+ if use_cache:
409
+ logger.warn(
410
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
411
+ )
412
+ use_cache = False
413
+
414
+ def create_custom_forward(module):
415
+ def custom_forward(*inputs):
416
+ return module(*inputs, past_key_value, output_attentions)
417
+
418
+ return custom_forward
419
+
420
+ layer_outputs = torch.utils.checkpoint.checkpoint(
421
+ create_custom_forward(layer_module),
422
+ hidden_states,
423
+ attention_mask,
424
+ layer_head_mask,
425
+ encoder_hidden_states,
426
+ encoder_attention_mask,
427
+ mode=mode,
428
+ )
429
+ else:
430
+ layer_outputs = layer_module(
431
+ hidden_states,
432
+ attention_mask,
433
+ layer_head_mask,
434
+ encoder_hidden_states,
435
+ encoder_attention_mask,
436
+ past_key_value,
437
+ output_attentions,
438
+ mode=mode,
439
+ )
440
+
441
+ hidden_states = layer_outputs[0]
442
+ if use_cache:
443
+ next_decoder_cache += (layer_outputs[-1],)
444
+ if output_attentions:
445
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
446
+
447
+ if output_hidden_states:
448
+ all_hidden_states = all_hidden_states + (hidden_states,)
449
+
450
+ if not return_dict:
451
+ return tuple(
452
+ v
453
+ for v in [
454
+ hidden_states,
455
+ next_decoder_cache,
456
+ all_hidden_states,
457
+ all_self_attentions,
458
+ all_cross_attentions,
459
+ ]
460
+ if v is not None
461
+ )
462
+ return BaseModelOutputWithPastAndCrossAttentions(
463
+ last_hidden_state=hidden_states,
464
+ past_key_values=next_decoder_cache,
465
+ hidden_states=all_hidden_states,
466
+ attentions=all_self_attentions,
467
+ cross_attentions=all_cross_attentions,
468
+ )
469
+
470
+
471
+ class BertPooler(nn.Module):
472
+ def __init__(self, config):
473
+ super().__init__()
474
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
475
+ self.activation = nn.Tanh()
476
+
477
+ def forward(self, hidden_states):
478
+ # We "pool" the model by simply taking the hidden state corresponding
479
+ # to the first token.
480
+ first_token_tensor = hidden_states[:, 0]
481
+ pooled_output = self.dense(first_token_tensor)
482
+ pooled_output = self.activation(pooled_output)
483
+ return pooled_output
484
+
485
+
486
+ class BertPredictionHeadTransform(nn.Module):
487
+ def __init__(self, config):
488
+ super().__init__()
489
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
490
+ if isinstance(config.hidden_act, str):
491
+ self.transform_act_fn = ACT2FN[config.hidden_act]
492
+ else:
493
+ self.transform_act_fn = config.hidden_act
494
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
495
+
496
+ def forward(self, hidden_states):
497
+ hidden_states = self.dense(hidden_states)
498
+ hidden_states = self.transform_act_fn(hidden_states)
499
+ hidden_states = self.LayerNorm(hidden_states)
500
+ return hidden_states
501
+
502
+
503
+ class BertLMPredictionHead(nn.Module):
504
+ def __init__(self, config):
505
+ super().__init__()
506
+ self.transform = BertPredictionHeadTransform(config)
507
+
508
+ # The output weights are the same as the input embeddings, but there is
509
+ # an output-only bias for each token.
510
+ self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
511
+
512
+ self.bias = nn.Parameter(torch.zeros(config.vocab_size))
513
+
514
+ # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
515
+ self.decoder.bias = self.bias
516
+
517
+ def forward(self, hidden_states):
518
+ hidden_states = self.transform(hidden_states)
519
+ hidden_states = self.decoder(hidden_states)
520
+ return hidden_states
521
+
522
+
523
+ class BertOnlyMLMHead(nn.Module):
524
+ def __init__(self, config):
525
+ super().__init__()
526
+ self.predictions = BertLMPredictionHead(config)
527
+
528
+ def forward(self, sequence_output):
529
+ prediction_scores = self.predictions(sequence_output)
530
+ return prediction_scores
531
+
532
+
533
+ class BertPreTrainedModel(PreTrainedModel):
534
+ """
535
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
536
+ models.
537
+ """
538
+
539
+ config_class = BertConfig
540
+ base_model_prefix = "bert"
541
+ _keys_to_ignore_on_load_missing = [r"position_ids"]
542
+
543
+ def _init_weights(self, module):
544
+ """ Initialize the weights """
545
+ if isinstance(module, (nn.Linear, nn.Embedding)):
546
+ # Slightly different from the TF version which uses truncated_normal for initialization
547
+ # cf https://github.com/pytorch/pytorch/pull/5617
548
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
549
+ elif isinstance(module, nn.LayerNorm):
550
+ module.bias.data.zero_()
551
+ module.weight.data.fill_(1.0)
552
+ if isinstance(module, nn.Linear) and module.bias is not None:
553
+ module.bias.data.zero_()
554
+
555
+
556
+ class BertModel(BertPreTrainedModel):
557
+ """
558
+ The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of
559
+ cross-attention is added between the self-attention layers, following the architecture described in `Attention is
560
+ all you need <https://arxiv.org/abs/1706.03762>`__ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit,
561
+ Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.
562
+ argument and :obj:`add_cross_attention` set to :obj:`True`; an :obj:`encoder_hidden_states` is then expected as an
563
+ input to the forward pass.
564
+ """
565
+
566
+ def __init__(self, config, add_pooling_layer=True):
567
+ super().__init__(config)
568
+ self.config = config
569
+
570
+ self.embeddings = BertEmbeddings(config)
571
+
572
+ self.encoder = BertEncoder(config)
573
+
574
+ self.pooler = BertPooler(config) if add_pooling_layer else None
575
+
576
+ self.init_weights()
577
+
578
+
579
+ def get_input_embeddings(self):
580
+ return self.embeddings.word_embeddings
581
+
582
+ def set_input_embeddings(self, value):
583
+ self.embeddings.word_embeddings = value
584
+
585
+ def _prune_heads(self, heads_to_prune):
586
+ """
587
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
588
+ class PreTrainedModel
589
+ """
590
+ for layer, heads in heads_to_prune.items():
591
+ self.encoder.layer[layer].attention.prune_heads(heads)
592
+
593
+
594
+ def get_extended_attention_mask(self, attention_mask: Tensor, input_shape: Tuple[int], device: device, is_decoder: bool) -> Tensor:
595
+ """
596
+ Makes broadcastable attention and causal masks so that future and masked tokens are ignored.
597
+
598
+ Arguments:
599
+ attention_mask (:obj:`torch.Tensor`):
600
+ Mask with ones indicating tokens to attend to, zeros for tokens to ignore.
601
+ input_shape (:obj:`Tuple[int]`):
602
+ The shape of the input to the model.
603
+ device: (:obj:`torch.device`):
604
+ The device of the input to the model.
605
+
606
+ Returns:
607
+ :obj:`torch.Tensor` The extended attention mask, with a the same dtype as :obj:`attention_mask.dtype`.
608
+ """
609
+ # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
610
+ # ourselves in which case we just need to make it broadcastable to all heads.
611
+ if attention_mask.dim() == 3:
612
+ extended_attention_mask = attention_mask[:, None, :, :]
613
+ elif attention_mask.dim() == 2:
614
+ # Provided a padding mask of dimensions [batch_size, seq_length]
615
+ # - if the model is a decoder, apply a causal mask in addition to the padding mask
616
+ # - if the model is an encoder, make the mask broadcastable to [batch_size, num_heads, seq_length, seq_length]
617
+ if is_decoder:
618
+ batch_size, seq_length = input_shape
619
+
620
+ seq_ids = torch.arange(seq_length, device=device)
621
+ causal_mask = seq_ids[None, None, :].repeat(batch_size, seq_length, 1) <= seq_ids[None, :, None]
622
+ # in case past_key_values are used we need to add a prefix ones mask to the causal mask
623
+ # causal and attention masks must have same type with pytorch version < 1.3
624
+ causal_mask = causal_mask.to(attention_mask.dtype)
625
+
626
+ if causal_mask.shape[1] < attention_mask.shape[1]:
627
+ prefix_seq_len = attention_mask.shape[1] - causal_mask.shape[1]
628
+ causal_mask = torch.cat(
629
+ [
630
+ torch.ones((batch_size, seq_length, prefix_seq_len), device=device, dtype=causal_mask.dtype),
631
+ causal_mask,
632
+ ],
633
+ axis=-1,
634
+ )
635
+
636
+ extended_attention_mask = causal_mask[:, None, :, :] * attention_mask[:, None, None, :]
637
+ else:
638
+ extended_attention_mask = attention_mask[:, None, None, :]
639
+ else:
640
+ raise ValueError(
641
+ "Wrong shape for input_ids (shape {}) or attention_mask (shape {})".format(
642
+ input_shape, attention_mask.shape
643
+ )
644
+ )
645
+
646
+ # Since attention_mask is 1.0 for positions we want to attend and 0.0 for
647
+ # masked positions, this operation will create a tensor which is 0.0 for
648
+ # positions we want to attend and -10000.0 for masked positions.
649
+ # Since we are adding it to the raw scores before the softmax, this is
650
+ # effectively the same as removing these entirely.
651
+ extended_attention_mask = extended_attention_mask.to(dtype=self.dtype) # fp16 compatibility
652
+ extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
653
+ return extended_attention_mask
654
+
655
+ def forward(
656
+ self,
657
+ input_ids=None,
658
+ attention_mask=None,
659
+ position_ids=None,
660
+ head_mask=None,
661
+ inputs_embeds=None,
662
+ encoder_embeds=None,
663
+ encoder_hidden_states=None,
664
+ encoder_attention_mask=None,
665
+ past_key_values=None,
666
+ use_cache=None,
667
+ output_attentions=None,
668
+ output_hidden_states=None,
669
+ return_dict=None,
670
+ is_decoder=False,
671
+ mode='multimodal',
672
+ ):
673
+ r"""
674
+ encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
675
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
676
+ the model is configured as a decoder.
677
+ encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
678
+ Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
679
+ the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:
680
+ - 1 for tokens that are **not masked**,
681
+ - 0 for tokens that are **masked**.
682
+ past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
683
+ Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
684
+ If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`
685
+ (those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`
686
+ instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.
687
+ use_cache (:obj:`bool`, `optional`):
688
+ If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
689
+ decoding (see :obj:`past_key_values`).
690
+ """
691
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
692
+ output_hidden_states = (
693
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
694
+ )
695
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
696
+
697
+ if is_decoder:
698
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
699
+ else:
700
+ use_cache = False
701
+
702
+ if input_ids is not None and inputs_embeds is not None:
703
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
704
+ elif input_ids is not None:
705
+ input_shape = input_ids.size()
706
+ batch_size, seq_length = input_shape
707
+ device = input_ids.device
708
+ elif inputs_embeds is not None:
709
+ input_shape = inputs_embeds.size()[:-1]
710
+ batch_size, seq_length = input_shape
711
+ device = inputs_embeds.device
712
+ elif encoder_embeds is not None:
713
+ input_shape = encoder_embeds.size()[:-1]
714
+ batch_size, seq_length = input_shape
715
+ device = encoder_embeds.device
716
+ else:
717
+ raise ValueError("You have to specify either input_ids or inputs_embeds or encoder_embeds")
718
+
719
+ # past_key_values_length
720
+ past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
721
+
722
+ if attention_mask is None:
723
+ attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device)
724
+
725
+ # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
726
+ # ourselves in which case we just need to make it broadcastable to all heads.
727
+ extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape,
728
+ device, is_decoder)
729
+
730
+ # If a 2D or 3D attention mask is provided for the cross-attention
731
+ # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
732
+ if encoder_hidden_states is not None:
733
+ if type(encoder_hidden_states) == list:
734
+ encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states[0].size()
735
+ else:
736
+ encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
737
+ encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
738
+
739
+ if type(encoder_attention_mask) == list:
740
+ encoder_extended_attention_mask = [self.invert_attention_mask(mask) for mask in encoder_attention_mask]
741
+ elif encoder_attention_mask is None:
742
+ encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
743
+ encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
744
+ else:
745
+ encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
746
+ else:
747
+ encoder_extended_attention_mask = None
748
+
749
+ # Prepare head mask if needed
750
+ # 1.0 in head_mask indicate we keep the head
751
+ # attention_probs has shape bsz x n_heads x N x N
752
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
753
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
754
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
755
+
756
+ if encoder_embeds is None:
757
+ embedding_output = self.embeddings(
758
+ input_ids=input_ids,
759
+ position_ids=position_ids,
760
+ inputs_embeds=inputs_embeds,
761
+ past_key_values_length=past_key_values_length,
762
+ )
763
+ else:
764
+ embedding_output = encoder_embeds
765
+
766
+ encoder_outputs = self.encoder(
767
+ embedding_output,
768
+ attention_mask=extended_attention_mask,
769
+ head_mask=head_mask,
770
+ encoder_hidden_states=encoder_hidden_states,
771
+ encoder_attention_mask=encoder_extended_attention_mask,
772
+ past_key_values=past_key_values,
773
+ use_cache=use_cache,
774
+ output_attentions=output_attentions,
775
+ output_hidden_states=output_hidden_states,
776
+ return_dict=return_dict,
777
+ mode=mode,
778
+ )
779
+ sequence_output = encoder_outputs[0]
780
+ pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
781
+
782
+ if not return_dict:
783
+ return (sequence_output, pooled_output) + encoder_outputs[1:]
784
+
785
+ return BaseModelOutputWithPoolingAndCrossAttentions(
786
+ last_hidden_state=sequence_output,
787
+ pooler_output=pooled_output,
788
+ past_key_values=encoder_outputs.past_key_values,
789
+ hidden_states=encoder_outputs.hidden_states,
790
+ attentions=encoder_outputs.attentions,
791
+ cross_attentions=encoder_outputs.cross_attentions,
792
+ )
793
+
794
+
795
+
796
+ class BertLMHeadModel(BertPreTrainedModel):
797
+
798
+ _keys_to_ignore_on_load_unexpected = [r"pooler"]
799
+ _keys_to_ignore_on_load_missing = [r"position_ids", r"predictions.decoder.bias"]
800
+
801
+ def __init__(self, config):
802
+ super().__init__(config)
803
+
804
+ self.bert = BertModel(config, add_pooling_layer=False)
805
+ self.cls = BertOnlyMLMHead(config)
806
+
807
+ self.init_weights()
808
+
809
+ def get_output_embeddings(self):
810
+ return self.cls.predictions.decoder
811
+
812
+ def set_output_embeddings(self, new_embeddings):
813
+ self.cls.predictions.decoder = new_embeddings
814
+
815
+ def forward(
816
+ self,
817
+ input_ids=None,
818
+ attention_mask=None,
819
+ position_ids=None,
820
+ head_mask=None,
821
+ inputs_embeds=None,
822
+ encoder_hidden_states=None,
823
+ encoder_attention_mask=None,
824
+ labels=None,
825
+ past_key_values=None,
826
+ use_cache=None,
827
+ output_attentions=None,
828
+ output_hidden_states=None,
829
+ return_dict=None,
830
+ return_logits=False,
831
+ is_decoder=True,
832
+ reduction='mean',
833
+ mode='multimodal',
834
+ ):
835
+ r"""
836
+ encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
837
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
838
+ the model is configured as a decoder.
839
+ encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
840
+ Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
841
+ the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:
842
+ - 1 for tokens that are **not masked**,
843
+ - 0 for tokens that are **masked**.
844
+ labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
845
+ Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in
846
+ ``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are
847
+ ignored (masked), the loss is only computed for the tokens with labels n ``[0, ..., config.vocab_size]``
848
+ past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
849
+ Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
850
+ If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`
851
+ (those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`
852
+ instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.
853
+ use_cache (:obj:`bool`, `optional`):
854
+ If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
855
+ decoding (see :obj:`past_key_values`).
856
+ Returns:
857
+ Example::
858
+ >>> from transformers import BertTokenizer, BertLMHeadModel, BertConfig
859
+ >>> import torch
860
+ >>> tokenizer = BertTokenizer.from_pretrained('bert-base-cased')
861
+ >>> config = BertConfig.from_pretrained("bert-base-cased")
862
+ >>> model = BertLMHeadModel.from_pretrained('bert-base-cased', config=config)
863
+ >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
864
+ >>> outputs = model(**inputs)
865
+ >>> prediction_logits = outputs.logits
866
+ """
867
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
868
+ if labels is not None:
869
+ use_cache = False
870
+
871
+ outputs = self.bert(
872
+ input_ids,
873
+ attention_mask=attention_mask,
874
+ position_ids=position_ids,
875
+ head_mask=head_mask,
876
+ inputs_embeds=inputs_embeds,
877
+ encoder_hidden_states=encoder_hidden_states,
878
+ encoder_attention_mask=encoder_attention_mask,
879
+ past_key_values=past_key_values,
880
+ use_cache=use_cache,
881
+ output_attentions=output_attentions,
882
+ output_hidden_states=output_hidden_states,
883
+ return_dict=return_dict,
884
+ is_decoder=is_decoder,
885
+ mode=mode,
886
+ )
887
+
888
+ sequence_output = outputs[0]
889
+ prediction_scores = self.cls(sequence_output)
890
+
891
+ if return_logits:
892
+ return prediction_scores[:, :-1, :].contiguous()
893
+
894
+ lm_loss = None
895
+ if labels is not None:
896
+ # we are doing next-token prediction; shift prediction scores and input ids by one
897
+ shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous()
898
+ labels = labels[:, 1:].contiguous()
899
+ loss_fct = CrossEntropyLoss(reduction=reduction, label_smoothing=0.1)
900
+ lm_loss = loss_fct(shifted_prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
901
+ if reduction=='none':
902
+ lm_loss = lm_loss.view(prediction_scores.size(0),-1).sum(1)
903
+
904
+ if not return_dict:
905
+ output = (prediction_scores,) + outputs[2:]
906
+ return ((lm_loss,) + output) if lm_loss is not None else output
907
+
908
+ return CausalLMOutputWithCrossAttentions(
909
+ loss=lm_loss,
910
+ logits=prediction_scores,
911
+ past_key_values=outputs.past_key_values,
912
+ hidden_states=outputs.hidden_states,
913
+ attentions=outputs.attentions,
914
+ cross_attentions=outputs.cross_attentions,
915
+ )
916
+
917
+ def prepare_inputs_for_generation(self, input_ids, past=None, attention_mask=None, **model_kwargs):
918
+ input_shape = input_ids.shape
919
+ # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly
920
+ if attention_mask is None:
921
+ attention_mask = input_ids.new_ones(input_shape)
922
+
923
+ # cut decoder_input_ids if past is used
924
+ if past is not None:
925
+ input_ids = input_ids[:, -1:]
926
+
927
+ return {
928
+ "input_ids": input_ids,
929
+ "attention_mask": attention_mask,
930
+ "past_key_values": past,
931
+ "encoder_hidden_states": model_kwargs.get("encoder_hidden_states", None),
932
+ "encoder_attention_mask": model_kwargs.get("encoder_attention_mask", None),
933
+ "is_decoder": True,
934
+ }
935
+
936
+ def _reorder_cache(self, past, beam_idx):
937
+ reordered_past = ()
938
+ for layer_past in past:
939
+ reordered_past += (tuple(past_state.index_select(0, beam_idx) for past_state in layer_past),)
940
+ return reordered_past
Model/clip/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ from .clip import *
Model/clip/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (174 Bytes). View file
 
Model/clip/__pycache__/clip.cpython-310.pyc ADDED
Binary file (8.82 kB). View file
 
Model/clip/__pycache__/model.cpython-310.pyc ADDED
Binary file (15.1 kB). View file
 
Model/clip/__pycache__/simple_tokenizer.cpython-310.pyc ADDED
Binary file (5.7 kB). View file
 
Model/clip/bpe_simple_vocab_16e6.txt.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:924691ac288e54409236115652ad4aa250f48203de50a9e4722a6ecd48d6804a
3
+ size 1356917
Model/clip/clip.py ADDED
@@ -0,0 +1,237 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import hashlib
2
+ import os
3
+ import urllib
4
+ import warnings
5
+ from typing import Any, Union, List
6
+ from pkg_resources import packaging
7
+
8
+ import torch
9
+ from PIL import Image
10
+ from torchvision.transforms import Compose, Resize, CenterCrop, ToTensor, Normalize
11
+ from tqdm import tqdm
12
+
13
+ from .model import build_model
14
+ from .simple_tokenizer import SimpleTokenizer as _Tokenizer
15
+
16
+ try:
17
+ from torchvision.transforms import InterpolationMode
18
+ BICUBIC = InterpolationMode.BICUBIC
19
+ except ImportError:
20
+ BICUBIC = Image.BICUBIC
21
+
22
+
23
+ if packaging.version.parse(torch.__version__) < packaging.version.parse("1.7.1"):
24
+ warnings.warn("PyTorch version 1.7.1 or higher is recommended")
25
+
26
+
27
+ __all__ = ["available_models", "load", "tokenize"]
28
+ _tokenizer = _Tokenizer()
29
+
30
+ _MODELS = {
31
+ "RN50": "https://openaipublic.azureedge.net/clip/models/afeb0e10f9e5a86da6080e35cf09123aca3b358a0c3e3b6c78a7b63bc04b6762/RN50.pt",
32
+ "RN101": "https://openaipublic.azureedge.net/clip/models/8fa8567bab74a42d41c5915025a8e4538c3bdbe8804a470a72f30b0d94fab599/RN101.pt",
33
+ "RN50x4": "https://openaipublic.azureedge.net/clip/models/7e526bd135e493cef0776de27d5f42653e6b4c8bf9e0f653bb11773263205fdd/RN50x4.pt",
34
+ "RN50x16": "https://openaipublic.azureedge.net/clip/models/52378b407f34354e150460fe41077663dd5b39c54cd0bfd2b27167a4a06ec9aa/RN50x16.pt",
35
+ "RN50x64": "https://openaipublic.azureedge.net/clip/models/be1cfb55d75a9666199fb2206c106743da0f6468c9d327f3e0d0a543a9919d9c/RN50x64.pt",
36
+ "ViT-B/32": "https://openaipublic.azureedge.net/clip/models/40d365715913c9da98579312b702a82c18be219cc2a73407c4526f58eba950af/ViT-B-32.pt",
37
+ "ViT-B/16": "https://openaipublic.azureedge.net/clip/models/5806e77cd80f8b59890b7e101eabd078d9fb84e6937f9e85e4ecb61988df416f/ViT-B-16.pt",
38
+ "ViT-L/14": "https://openaipublic.azureedge.net/clip/models/b8cca3fd41ae0c99ba7e8951adf17d267cdb84cd88be6f7c2e0eca1737a03836/ViT-L-14.pt",
39
+ "ViT-L/14@336px": "https://openaipublic.azureedge.net/clip/models/3035c92b350959924f9f00213499208652fc7ea050643e8b385c2dac08641f02/ViT-L-14-336px.pt",
40
+ }
41
+
42
+
43
+ def _download(url: str, root: str):
44
+ os.makedirs(root, exist_ok=True)
45
+ filename = os.path.basename(url)
46
+
47
+ expected_sha256 = url.split("/")[-2]
48
+ download_target = os.path.join(root, filename)
49
+
50
+ if os.path.exists(download_target) and not os.path.isfile(download_target):
51
+ raise RuntimeError(f"{download_target} exists and is not a regular file")
52
+
53
+ if os.path.isfile(download_target):
54
+ if hashlib.sha256(open(download_target, "rb").read()).hexdigest() == expected_sha256:
55
+ return download_target
56
+ else:
57
+ warnings.warn(f"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file")
58
+
59
+ with urllib.request.urlopen(url) as source, open(download_target, "wb") as output:
60
+ with tqdm(total=int(source.info().get("Content-Length")), ncols=80, unit='iB', unit_scale=True, unit_divisor=1024) as loop:
61
+ while True:
62
+ buffer = source.read(8192)
63
+ if not buffer:
64
+ break
65
+
66
+ output.write(buffer)
67
+ loop.update(len(buffer))
68
+
69
+ if hashlib.sha256(open(download_target, "rb").read()).hexdigest() != expected_sha256:
70
+ raise RuntimeError(f"Model has been downloaded but the SHA256 checksum does not not match")
71
+
72
+ return download_target
73
+
74
+
75
+ def _convert_image_to_rgb(image):
76
+ return image.convert("RGB")
77
+
78
+
79
+ def _transform(n_px):
80
+ return Compose([
81
+ Resize(n_px, interpolation=BICUBIC),
82
+ CenterCrop(n_px),
83
+ _convert_image_to_rgb,
84
+ ToTensor(),
85
+ Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)),
86
+ ])
87
+
88
+
89
+ def available_models() -> List[str]:
90
+ """Returns the names of available CLIP models"""
91
+ return list(_MODELS.keys())
92
+
93
+
94
+ def load(name: str, device: Union[str, torch.device] = "cuda" if torch.cuda.is_available() else "cpu", jit: bool = False, download_root: str = None):
95
+ """Load a CLIP model
96
+
97
+ Parameters
98
+ ----------
99
+ name : str
100
+ A model name listed by `clip.available_models()`, or the path to a model checkpoint containing the state_dict
101
+
102
+ device : Union[str, torch.device]
103
+ The device to put the loaded model
104
+
105
+ jit : bool
106
+ Whether to load the optimized JIT model or more hackable non-JIT model (default).
107
+
108
+ download_root: str
109
+ path to download the model files; by default, it uses "~/.cache/clip"
110
+
111
+ Returns
112
+ -------
113
+ model : torch.nn.Module
114
+ The CLIP model
115
+
116
+ preprocess : Callable[[PIL.Image], torch.Tensor]
117
+ A torchvision transform that converts a PIL image into a tensor that the returned model can take as its input
118
+ """
119
+ if name in _MODELS:
120
+ model_path = _download(_MODELS[name], download_root or os.path.expanduser("~/.cache/clip"))
121
+ elif os.path.isfile(name):
122
+ model_path = name
123
+ else:
124
+ raise RuntimeError(f"Model {name} not found; available models = {available_models()}")
125
+
126
+ with open(model_path, 'rb') as opened_file:
127
+ try:
128
+ # loading JIT archive
129
+ model = torch.jit.load(opened_file, map_location=device if jit else "cpu").eval()
130
+ state_dict = None
131
+ except RuntimeError:
132
+ # loading saved state dict
133
+ if jit:
134
+ warnings.warn(f"File {model_path} is not a JIT archive. Loading as a state dict instead")
135
+ jit = False
136
+ state_dict = torch.load(opened_file, map_location="cpu")
137
+
138
+ if not jit:
139
+ model = build_model(state_dict or model.state_dict()).to(device)
140
+ if str(device) == "cpu":
141
+ model.float()
142
+ return model, _transform(model.visual.input_resolution)
143
+
144
+ # patch the device names
145
+ device_holder = torch.jit.trace(lambda: torch.ones([]).to(torch.device(device)), example_inputs=[])
146
+ device_node = [n for n in device_holder.graph.findAllNodes("prim::Constant") if "Device" in repr(n)][-1]
147
+
148
+ def patch_device(module):
149
+ try:
150
+ graphs = [module.graph] if hasattr(module, "graph") else []
151
+ except RuntimeError:
152
+ graphs = []
153
+
154
+ if hasattr(module, "forward1"):
155
+ graphs.append(module.forward1.graph)
156
+
157
+ for graph in graphs:
158
+ for node in graph.findAllNodes("prim::Constant"):
159
+ if "value" in node.attributeNames() and str(node["value"]).startswith("cuda"):
160
+ node.copyAttributes(device_node)
161
+
162
+ model.apply(patch_device)
163
+ patch_device(model.encode_image)
164
+ patch_device(model.encode_text)
165
+
166
+ # patch dtype to float32 on CPU
167
+ if str(device) == "cpu":
168
+ float_holder = torch.jit.trace(lambda: torch.ones([]).float(), example_inputs=[])
169
+ float_input = list(float_holder.graph.findNode("aten::to").inputs())[1]
170
+ float_node = float_input.node()
171
+
172
+ def patch_float(module):
173
+ try:
174
+ graphs = [module.graph] if hasattr(module, "graph") else []
175
+ except RuntimeError:
176
+ graphs = []
177
+
178
+ if hasattr(module, "forward1"):
179
+ graphs.append(module.forward1.graph)
180
+
181
+ for graph in graphs:
182
+ for node in graph.findAllNodes("aten::to"):
183
+ inputs = list(node.inputs())
184
+ for i in [1, 2]: # dtype can be the second or third argument to aten::to()
185
+ if inputs[i].node()["value"] == 5:
186
+ inputs[i].node().copyAttributes(float_node)
187
+
188
+ model.apply(patch_float)
189
+ patch_float(model.encode_image)
190
+ patch_float(model.encode_text)
191
+
192
+ model.float()
193
+
194
+ return model, _transform(model.input_resolution.item())
195
+
196
+
197
+ def tokenize(texts: Union[str, List[str]], context_length: int = 77, truncate: bool = False) -> Union[torch.IntTensor, torch.LongTensor]:
198
+ """
199
+ Returns the tokenized representation of given input string(s)
200
+
201
+ Parameters
202
+ ----------
203
+ texts : Union[str, List[str]]
204
+ An input string or a list of input strings to tokenize
205
+
206
+ context_length : int
207
+ The context length to use; all CLIP models use 77 as the context length
208
+
209
+ truncate: bool
210
+ Whether to truncate the text in case its encoding is longer than the context length
211
+
212
+ Returns
213
+ -------
214
+ A two-dimensional tensor containing the resulting tokens, shape = [number of input strings, context_length].
215
+ We return LongTensor when torch version is <1.8.0, since older index_select requires indices to be long.
216
+ """
217
+ if isinstance(texts, str):
218
+ texts = [texts]
219
+
220
+ sot_token = _tokenizer.encoder["<|startoftext|>"]
221
+ eot_token = _tokenizer.encoder["<|endoftext|>"]
222
+ all_tokens = [[sot_token] + _tokenizer.encode(text) + [eot_token] for text in texts]
223
+ if packaging.version.parse(torch.__version__) < packaging.version.parse("1.8.0"):
224
+ result = torch.zeros(len(all_tokens), context_length, dtype=torch.long)
225
+ else:
226
+ result = torch.zeros(len(all_tokens), context_length, dtype=torch.int)
227
+
228
+ for i, tokens in enumerate(all_tokens):
229
+ if len(tokens) > context_length:
230
+ if truncate:
231
+ tokens = tokens[:context_length]
232
+ tokens[-1] = eot_token
233
+ else:
234
+ raise RuntimeError(f"Input {texts[i]} is too long for context length {context_length}")
235
+ result[i, :len(tokens)] = torch.tensor(tokens)
236
+
237
+ return result
Model/clip/model.py ADDED
@@ -0,0 +1,437 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from collections import OrderedDict
2
+ from typing import Tuple, Union
3
+
4
+ import numpy as np
5
+ import torch
6
+ import torch.nn.functional as F
7
+ from torch import nn
8
+
9
+
10
+ class Bottleneck(nn.Module):
11
+ expansion = 4
12
+
13
+ def __init__(self, inplanes, planes, stride=1):
14
+ super().__init__()
15
+
16
+ # all conv layers have stride 1. an avgpool is performed after the second convolution when stride > 1
17
+ self.conv1 = nn.Conv2d(inplanes, planes, 1, bias=False)
18
+ self.bn1 = nn.BatchNorm2d(planes)
19
+ self.relu1 = nn.ReLU(inplace=True)
20
+
21
+ self.conv2 = nn.Conv2d(planes, planes, 3, padding=1, bias=False)
22
+ self.bn2 = nn.BatchNorm2d(planes)
23
+ self.relu2 = nn.ReLU(inplace=True)
24
+
25
+ self.avgpool = nn.AvgPool2d(stride) if stride > 1 else nn.Identity()
26
+
27
+ self.conv3 = nn.Conv2d(planes, planes * self.expansion, 1, bias=False)
28
+ self.bn3 = nn.BatchNorm2d(planes * self.expansion)
29
+ self.relu3 = nn.ReLU(inplace=True)
30
+
31
+ self.downsample = None
32
+ self.stride = stride
33
+
34
+ if stride > 1 or inplanes != planes * Bottleneck.expansion:
35
+ # downsampling layer is prepended with an avgpool, and the subsequent convolution has stride 1
36
+ self.downsample = nn.Sequential(OrderedDict([
37
+ ("-1", nn.AvgPool2d(stride)),
38
+ ("0", nn.Conv2d(inplanes, planes * self.expansion, 1, stride=1, bias=False)),
39
+ ("1", nn.BatchNorm2d(planes * self.expansion))
40
+ ]))
41
+
42
+ def forward(self, x: torch.Tensor):
43
+ identity = x
44
+
45
+ out = self.relu1(self.bn1(self.conv1(x)))
46
+ out = self.relu2(self.bn2(self.conv2(out)))
47
+ out = self.avgpool(out)
48
+ out = self.bn3(self.conv3(out))
49
+
50
+ if self.downsample is not None:
51
+ identity = self.downsample(x)
52
+
53
+ out += identity
54
+ out = self.relu3(out)
55
+ return out
56
+
57
+
58
+ class AttentionPool2d(nn.Module):
59
+ def __init__(self, spacial_dim: int, embed_dim: int, num_heads: int, output_dim: int = None):
60
+ super().__init__()
61
+ self.positional_embedding = nn.Parameter(torch.randn(spacial_dim ** 2 + 1, embed_dim) / embed_dim ** 0.5)
62
+ self.k_proj = nn.Linear(embed_dim, embed_dim)
63
+ self.q_proj = nn.Linear(embed_dim, embed_dim)
64
+ self.v_proj = nn.Linear(embed_dim, embed_dim)
65
+ self.c_proj = nn.Linear(embed_dim, output_dim or embed_dim)
66
+ self.num_heads = num_heads
67
+
68
+ def forward(self, x):
69
+ x = x.reshape(x.shape[0], x.shape[1], x.shape[2] * x.shape[3]).permute(2, 0, 1) # NCHW -> (HW)NC
70
+ x = torch.cat([x.mean(dim=0, keepdim=True), x], dim=0) # (HW+1)NC
71
+ x = x + self.positional_embedding[:, None, :].to(x.dtype) # (HW+1)NC
72
+ x, _ = F.multi_head_attention_forward(
73
+ query=x, key=x, value=x,
74
+ embed_dim_to_check=x.shape[-1],
75
+ num_heads=self.num_heads,
76
+ q_proj_weight=self.q_proj.weight,
77
+ k_proj_weight=self.k_proj.weight,
78
+ v_proj_weight=self.v_proj.weight,
79
+ in_proj_weight=None,
80
+ in_proj_bias=torch.cat([self.q_proj.bias, self.k_proj.bias, self.v_proj.bias]),
81
+ bias_k=None,
82
+ bias_v=None,
83
+ add_zero_attn=False,
84
+ dropout_p=0,
85
+ out_proj_weight=self.c_proj.weight,
86
+ out_proj_bias=self.c_proj.bias,
87
+ use_separate_proj_weight=True,
88
+ training=self.training,
89
+ need_weights=False
90
+ )
91
+
92
+ return x[0]
93
+
94
+
95
+ class ModifiedResNet(nn.Module):
96
+ """
97
+ A ResNet class that is similar to torchvision's but contains the following changes:
98
+ - There are now 3 "stem" convolutions as opposed to 1, with an average pool instead of a max pool.
99
+ - Performs anti-aliasing strided convolutions, where an avgpool is prepended to convolutions with stride > 1
100
+ - The final pooling layer is a QKV attention instead of an average pool
101
+ """
102
+
103
+ def __init__(self, layers, output_dim, heads, input_resolution=224, width=64):
104
+ super().__init__()
105
+ self.output_dim = output_dim
106
+ self.input_resolution = input_resolution
107
+
108
+ # the 3-layer stem
109
+ self.conv1 = nn.Conv2d(3, width // 2, kernel_size=3, stride=2, padding=1, bias=False)
110
+ self.bn1 = nn.BatchNorm2d(width // 2)
111
+ self.relu1 = nn.ReLU(inplace=True)
112
+ self.conv2 = nn.Conv2d(width // 2, width // 2, kernel_size=3, padding=1, bias=False)
113
+ self.bn2 = nn.BatchNorm2d(width // 2)
114
+ self.relu2 = nn.ReLU(inplace=True)
115
+ self.conv3 = nn.Conv2d(width // 2, width, kernel_size=3, padding=1, bias=False)
116
+ self.bn3 = nn.BatchNorm2d(width)
117
+ self.relu3 = nn.ReLU(inplace=True)
118
+ self.avgpool = nn.AvgPool2d(2)
119
+
120
+ # residual layers
121
+ self._inplanes = width # this is a *mutable* variable used during construction
122
+ self.layer1 = self._make_layer(width, layers[0])
123
+ self.layer2 = self._make_layer(width * 2, layers[1], stride=2)
124
+ self.layer3 = self._make_layer(width * 4, layers[2], stride=2)
125
+ self.layer4 = self._make_layer(width * 8, layers[3], stride=2)
126
+
127
+ embed_dim = width * 32 # the ResNet feature dimension
128
+ self.attnpool = AttentionPool2d(input_resolution // 32, embed_dim, heads, output_dim)
129
+
130
+ def _make_layer(self, planes, blocks, stride=1):
131
+ layers = [Bottleneck(self._inplanes, planes, stride)]
132
+
133
+ self._inplanes = planes * Bottleneck.expansion
134
+ for _ in range(1, blocks):
135
+ layers.append(Bottleneck(self._inplanes, planes))
136
+
137
+ return nn.Sequential(*layers)
138
+
139
+ def forward(self, x):
140
+ def stem(x):
141
+ x = self.relu1(self.bn1(self.conv1(x)))
142
+ x = self.relu2(self.bn2(self.conv2(x)))
143
+ x = self.relu3(self.bn3(self.conv3(x)))
144
+ x = self.avgpool(x)
145
+ return x
146
+
147
+ x = x.type(self.conv1.weight.dtype)
148
+ x = stem(x)
149
+ x = self.layer1(x)
150
+ x = self.layer2(x)
151
+ x = self.layer3(x)
152
+ x = self.layer4(x)
153
+ x = self.attnpool(x)
154
+
155
+ return x
156
+
157
+
158
+ class LayerNorm(nn.LayerNorm):
159
+ """Subclass torch's LayerNorm to handle fp16."""
160
+
161
+ def forward(self, x: torch.Tensor):
162
+ orig_type = x.dtype
163
+ ret = super().forward(x.type(torch.float32))
164
+ return ret.type(orig_type)
165
+
166
+
167
+ class QuickGELU(nn.Module):
168
+ def forward(self, x: torch.Tensor):
169
+ return x * torch.sigmoid(1.702 * x)
170
+
171
+
172
+ class ResidualAttentionBlock(nn.Module):
173
+ def __init__(self, d_model: int, n_head: int, attn_mask: torch.Tensor = None):
174
+ super().__init__()
175
+
176
+ self.attn = nn.MultiheadAttention(d_model, n_head)
177
+ self.ln_1 = LayerNorm(d_model)
178
+ self.mlp = nn.Sequential(OrderedDict([
179
+ ("c_fc", nn.Linear(d_model, d_model * 4)),
180
+ ("gelu", QuickGELU()),
181
+ ("c_proj", nn.Linear(d_model * 4, d_model))
182
+ ]))
183
+ self.ln_2 = LayerNorm(d_model)
184
+ self.attn_mask = attn_mask
185
+
186
+ def attention(self, x: torch.Tensor):
187
+ self.attn_mask = self.attn_mask.to(dtype=x.dtype, device=x.device) if self.attn_mask is not None else None
188
+ return self.attn(x, x, x, need_weights=False, attn_mask=self.attn_mask)[0]
189
+
190
+ def forward(self, x: torch.Tensor):
191
+ x = x + self.attention(self.ln_1(x))
192
+ x = x + self.mlp(self.ln_2(x))
193
+ return x
194
+
195
+
196
+ class Transformer(nn.Module):
197
+ def __init__(self, width: int, layers: int, heads: int, attn_mask: torch.Tensor = None):
198
+ super().__init__()
199
+ self.width = width
200
+ self.layers = layers
201
+ self.resblocks = nn.Sequential(*[ResidualAttentionBlock(width, heads, attn_mask) for _ in range(layers)])
202
+
203
+ def forward(self, x: torch.Tensor):
204
+ return self.resblocks(x)
205
+
206
+
207
+ class VisionTransformer(nn.Module):
208
+ def __init__(self, input_resolution: int, patch_size: int, width: int, layers: int, heads: int, output_dim: int):
209
+ super().__init__()
210
+ self.input_resolution = input_resolution
211
+ self.output_dim = output_dim
212
+ self.conv1 = nn.Conv2d(in_channels=3, out_channels=width, kernel_size=patch_size, stride=patch_size, bias=False)
213
+
214
+ scale = width ** -0.5
215
+ self.class_embedding = nn.Parameter(scale * torch.randn(width))
216
+ self.positional_embedding = nn.Parameter(scale * torch.randn((input_resolution // patch_size) ** 2 + 1, width))
217
+ self.ln_pre = LayerNorm(width)
218
+
219
+ self.transformer = Transformer(width, layers, heads)
220
+
221
+ self.ln_post = LayerNorm(width)
222
+ self.proj = nn.Parameter(scale * torch.randn(width, output_dim))
223
+
224
+ def forward(self, x: torch.Tensor):
225
+ x = self.conv1(x) # shape = [*, width, grid, grid]
226
+ x = x.reshape(x.shape[0], x.shape[1], -1) # shape = [*, width, grid ** 2]
227
+ x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width]
228
+ x = torch.cat([self.class_embedding.to(x.dtype) + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device), x], dim=1) # shape = [*, grid ** 2 + 1, width]
229
+ x = x + self.positional_embedding.to(x.dtype)
230
+ x = self.ln_pre(x)
231
+
232
+ x = x.permute(1, 0, 2) # NLD -> LND
233
+ x = self.transformer(x)
234
+ x = x.permute(1, 0, 2) # LND -> NLD
235
+
236
+ # x = self.ln_post(x[:, 0, :])
237
+ #
238
+ # if self.proj is not None:
239
+ # x = x @ self.proj
240
+
241
+ return x
242
+
243
+
244
+ class CLIP(nn.Module):
245
+ def __init__(self,
246
+ embed_dim: int,
247
+ # vision
248
+ image_resolution: int,
249
+ vision_layers: Union[Tuple[int, int, int, int], int],
250
+ vision_width: int,
251
+ vision_patch_size: int,
252
+ # text
253
+ context_length: int,
254
+ vocab_size: int,
255
+ transformer_width: int,
256
+ transformer_heads: int,
257
+ transformer_layers: int
258
+ ):
259
+ super().__init__()
260
+
261
+ self.context_length = context_length
262
+
263
+ if isinstance(vision_layers, (tuple, list)):
264
+ vision_heads = vision_width * 32 // 64
265
+ self.visual = ModifiedResNet(
266
+ layers=vision_layers,
267
+ output_dim=embed_dim,
268
+ heads=vision_heads,
269
+ input_resolution=image_resolution,
270
+ width=vision_width
271
+ )
272
+ else:
273
+ vision_heads = vision_width // 64
274
+ self.visual = VisionTransformer(
275
+ input_resolution=image_resolution,
276
+ patch_size=vision_patch_size,
277
+ width=vision_width,
278
+ layers=vision_layers,
279
+ heads=vision_heads,
280
+ output_dim=embed_dim
281
+ )
282
+
283
+ self.transformer = Transformer(
284
+ width=transformer_width,
285
+ layers=transformer_layers,
286
+ heads=transformer_heads,
287
+ attn_mask=self.build_attention_mask()
288
+ )
289
+
290
+ self.vocab_size = vocab_size
291
+ self.token_embedding = nn.Embedding(vocab_size, transformer_width)
292
+ self.positional_embedding = nn.Parameter(torch.empty(self.context_length, transformer_width))
293
+ self.ln_final = LayerNorm(transformer_width)
294
+
295
+ self.text_projection = nn.Parameter(torch.empty(transformer_width, embed_dim))
296
+ self.logit_scale = nn.Parameter(torch.ones([]) * np.log(1 / 0.07))
297
+
298
+ self.initialize_parameters()
299
+
300
+ def initialize_parameters(self):
301
+ nn.init.normal_(self.token_embedding.weight, std=0.02)
302
+ nn.init.normal_(self.positional_embedding, std=0.01)
303
+
304
+ if isinstance(self.visual, ModifiedResNet):
305
+ if self.visual.attnpool is not None:
306
+ std = self.visual.attnpool.c_proj.in_features ** -0.5
307
+ nn.init.normal_(self.visual.attnpool.q_proj.weight, std=std)
308
+ nn.init.normal_(self.visual.attnpool.k_proj.weight, std=std)
309
+ nn.init.normal_(self.visual.attnpool.v_proj.weight, std=std)
310
+ nn.init.normal_(self.visual.attnpool.c_proj.weight, std=std)
311
+
312
+ for resnet_block in [self.visual.layer1, self.visual.layer2, self.visual.layer3, self.visual.layer4]:
313
+ for name, param in resnet_block.named_parameters():
314
+ if name.endswith("bn3.weight"):
315
+ nn.init.zeros_(param)
316
+
317
+ proj_std = (self.transformer.width ** -0.5) * ((2 * self.transformer.layers) ** -0.5)
318
+ attn_std = self.transformer.width ** -0.5
319
+ fc_std = (2 * self.transformer.width) ** -0.5
320
+ for block in self.transformer.resblocks:
321
+ nn.init.normal_(block.attn.in_proj_weight, std=attn_std)
322
+ nn.init.normal_(block.attn.out_proj.weight, std=proj_std)
323
+ nn.init.normal_(block.mlp.c_fc.weight, std=fc_std)
324
+ nn.init.normal_(block.mlp.c_proj.weight, std=proj_std)
325
+
326
+ if self.text_projection is not None:
327
+ nn.init.normal_(self.text_projection, std=self.transformer.width ** -0.5)
328
+
329
+ def build_attention_mask(self):
330
+ # lazily create causal attention mask, with full attention between the vision tokens
331
+ # pytorch uses additive attention mask; fill with -inf
332
+ mask = torch.empty(self.context_length, self.context_length)
333
+ mask.fill_(float("-inf"))
334
+ mask.triu_(1) # zero out the lower diagonal
335
+ return mask
336
+
337
+ @property
338
+ def dtype(self):
339
+ return self.visual.conv1.weight.dtype
340
+
341
+ def encode_image(self, image):
342
+ return self.visual(image.type(self.dtype))
343
+
344
+ def encode_text(self, text):
345
+ x = self.token_embedding(text).type(self.dtype) # [batch_size, n_ctx, d_model]
346
+
347
+ x = x + self.positional_embedding.type(self.dtype)
348
+ x = x.permute(1, 0, 2) # NLD -> LND
349
+ x = self.transformer(x)
350
+ x = x.permute(1, 0, 2) # LND -> NLD
351
+ x = self.ln_final(x).type(self.dtype)
352
+
353
+ # x.shape = [batch_size, n_ctx, transformer.width]
354
+ # take features from the eot embedding (eot_token is the highest number in each sequence)
355
+ x = x[torch.arange(x.shape[0]), text.argmax(dim=-1)] @ self.text_projection
356
+
357
+ return x
358
+
359
+ def forward(self, image, text):
360
+ image_features = self.encode_image(image)
361
+ text_features = self.encode_text(text)
362
+
363
+ # normalized features
364
+ image_features = image_features / image_features.norm(dim=1, keepdim=True)
365
+ text_features = text_features / text_features.norm(dim=1, keepdim=True)
366
+
367
+ # cosine similarity as logits
368
+ logit_scale = self.logit_scale.exp()
369
+ logits_per_image = logit_scale * image_features @ text_features.t()
370
+ logits_per_text = logits_per_image.t()
371
+
372
+ # shape = [global_batch_size, global_batch_size]
373
+ return logits_per_image, logits_per_text
374
+
375
+
376
+ def convert_weights(model: nn.Module):
377
+ """Convert applicable model parameters to fp16"""
378
+
379
+ def _convert_weights_to_fp16(l):
380
+ if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Linear)):
381
+ l.weight.data = l.weight.data.half()
382
+ if l.bias is not None:
383
+ l.bias.data = l.bias.data.half()
384
+
385
+ if isinstance(l, nn.MultiheadAttention):
386
+ for attr in [*[f"{s}_proj_weight" for s in ["in", "q", "k", "v"]], "in_proj_bias", "bias_k", "bias_v"]:
387
+ tensor = getattr(l, attr)
388
+ if tensor is not None:
389
+ tensor.data = tensor.data.half()
390
+
391
+ for name in ["text_projection", "proj"]:
392
+ if hasattr(l, name):
393
+ attr = getattr(l, name)
394
+ if attr is not None:
395
+ attr.data = attr.data.half()
396
+
397
+ model.apply(_convert_weights_to_fp16)
398
+
399
+
400
+ def build_model(state_dict: dict):
401
+ vit = "visual.proj" in state_dict
402
+
403
+ if vit:
404
+ vision_width = state_dict["visual.conv1.weight"].shape[0]
405
+ vision_layers = len([k for k in state_dict.keys() if k.startswith("visual.") and k.endswith(".attn.in_proj_weight")])
406
+ vision_patch_size = state_dict["visual.conv1.weight"].shape[-1]
407
+ grid_size = round((state_dict["visual.positional_embedding"].shape[0] - 1) ** 0.5)
408
+ image_resolution = vision_patch_size * grid_size
409
+ else:
410
+ counts: list = [len(set(k.split(".")[2] for k in state_dict if k.startswith(f"visual.layer{b}"))) for b in [1, 2, 3, 4]]
411
+ vision_layers = tuple(counts)
412
+ vision_width = state_dict["visual.layer1.0.conv1.weight"].shape[0]
413
+ output_width = round((state_dict["visual.attnpool.positional_embedding"].shape[0] - 1) ** 0.5)
414
+ vision_patch_size = None
415
+ assert output_width ** 2 + 1 == state_dict["visual.attnpool.positional_embedding"].shape[0]
416
+ image_resolution = output_width * 32
417
+
418
+ embed_dim = state_dict["text_projection"].shape[1]
419
+ context_length = state_dict["positional_embedding"].shape[0]
420
+ vocab_size = state_dict["token_embedding.weight"].shape[0]
421
+ transformer_width = state_dict["ln_final.weight"].shape[0]
422
+ transformer_heads = transformer_width // 64
423
+ transformer_layers = len(set(k.split(".")[2] for k in state_dict if k.startswith(f"transformer.resblocks")))
424
+
425
+ model = CLIP(
426
+ embed_dim,
427
+ image_resolution, vision_layers, vision_width, vision_patch_size,
428
+ context_length, vocab_size, transformer_width, transformer_heads, transformer_layers
429
+ )
430
+
431
+ for key in ["input_resolution", "context_length", "vocab_size"]:
432
+ if key in state_dict:
433
+ del state_dict[key]
434
+
435
+ convert_weights(model)
436
+ model.load_state_dict(state_dict)
437
+ return model.eval()
Model/clip/simple_tokenizer.py ADDED
@@ -0,0 +1,132 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gzip
2
+ import html
3
+ import os
4
+ from functools import lru_cache
5
+
6
+ import ftfy
7
+ import regex as re
8
+
9
+
10
+ @lru_cache()
11
+ def default_bpe():
12
+ return os.path.join(os.path.dirname(os.path.abspath(__file__)), "bpe_simple_vocab_16e6.txt.gz")
13
+
14
+
15
+ @lru_cache()
16
+ def bytes_to_unicode():
17
+ """
18
+ Returns list of utf-8 byte and a corresponding list of unicode strings.
19
+ The reversible bpe codes work on unicode strings.
20
+ This means you need a large # of unicode characters in your vocab if you want to avoid UNKs.
21
+ When you're at something like a 10B token dataset you end up needing around 5K for decent coverage.
22
+ This is a signficant percentage of your normal, say, 32K bpe vocab.
23
+ To avoid that, we want lookup tables between utf-8 bytes and unicode strings.
24
+ And avoids mapping to whitespace/control characters the bpe code barfs on.
25
+ """
26
+ bs = list(range(ord("!"), ord("~")+1))+list(range(ord("¡"), ord("¬")+1))+list(range(ord("®"), ord("ÿ")+1))
27
+ cs = bs[:]
28
+ n = 0
29
+ for b in range(2**8):
30
+ if b not in bs:
31
+ bs.append(b)
32
+ cs.append(2**8+n)
33
+ n += 1
34
+ cs = [chr(n) for n in cs]
35
+ return dict(zip(bs, cs))
36
+
37
+
38
+ def get_pairs(word):
39
+ """Return set of symbol pairs in a word.
40
+ Word is represented as tuple of symbols (symbols being variable-length strings).
41
+ """
42
+ pairs = set()
43
+ prev_char = word[0]
44
+ for char in word[1:]:
45
+ pairs.add((prev_char, char))
46
+ prev_char = char
47
+ return pairs
48
+
49
+
50
+ def basic_clean(text):
51
+ text = ftfy.fix_text(text)
52
+ text = html.unescape(html.unescape(text))
53
+ return text.strip()
54
+
55
+
56
+ def whitespace_clean(text):
57
+ text = re.sub(r'\s+', ' ', text)
58
+ text = text.strip()
59
+ return text
60
+
61
+
62
+ class SimpleTokenizer(object):
63
+ def __init__(self, bpe_path: str = default_bpe()):
64
+ self.byte_encoder = bytes_to_unicode()
65
+ self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
66
+ merges = gzip.open(bpe_path).read().decode("utf-8").split('\n')
67
+ merges = merges[1:49152-256-2+1]
68
+ merges = [tuple(merge.split()) for merge in merges]
69
+ vocab = list(bytes_to_unicode().values())
70
+ vocab = vocab + [v+'</w>' for v in vocab]
71
+ for merge in merges:
72
+ vocab.append(''.join(merge))
73
+ vocab.extend(['<|startoftext|>', '<|endoftext|>'])
74
+ self.encoder = dict(zip(vocab, range(len(vocab))))
75
+ self.decoder = {v: k for k, v in self.encoder.items()}
76
+ self.bpe_ranks = dict(zip(merges, range(len(merges))))
77
+ self.cache = {'<|startoftext|>': '<|startoftext|>', '<|endoftext|>': '<|endoftext|>'}
78
+ self.pat = re.compile(r"""<\|startoftext\|>|<\|endoftext\|>|'s|'t|'re|'ve|'m|'ll|'d|[\p{L}]+|[\p{N}]|[^\s\p{L}\p{N}]+""", re.IGNORECASE)
79
+
80
+ def bpe(self, token):
81
+ if token in self.cache:
82
+ return self.cache[token]
83
+ word = tuple(token[:-1]) + ( token[-1] + '</w>',)
84
+ pairs = get_pairs(word)
85
+
86
+ if not pairs:
87
+ return token+'</w>'
88
+
89
+ while True:
90
+ bigram = min(pairs, key = lambda pair: self.bpe_ranks.get(pair, float('inf')))
91
+ if bigram not in self.bpe_ranks:
92
+ break
93
+ first, second = bigram
94
+ new_word = []
95
+ i = 0
96
+ while i < len(word):
97
+ try:
98
+ j = word.index(first, i)
99
+ new_word.extend(word[i:j])
100
+ i = j
101
+ except:
102
+ new_word.extend(word[i:])
103
+ break
104
+
105
+ if word[i] == first and i < len(word)-1 and word[i+1] == second:
106
+ new_word.append(first+second)
107
+ i += 2
108
+ else:
109
+ new_word.append(word[i])
110
+ i += 1
111
+ new_word = tuple(new_word)
112
+ word = new_word
113
+ if len(word) == 1:
114
+ break
115
+ else:
116
+ pairs = get_pairs(word)
117
+ word = ' '.join(word)
118
+ self.cache[token] = word
119
+ return word
120
+
121
+ def encode(self, text):
122
+ bpe_tokens = []
123
+ text = whitespace_clean(basic_clean(text)).lower()
124
+ for token in re.findall(self.pat, text):
125
+ token = ''.join(self.byte_encoder[b] for b in token.encode('utf-8'))
126
+ bpe_tokens.extend(self.encoder[bpe_token] for bpe_token in self.bpe(token).split(' '))
127
+ return bpe_tokens
128
+
129
+ def decode(self, tokens):
130
+ text = ''.join([self.decoder[token] for token in tokens])
131
+ text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors="replace").replace('</w>', ' ')
132
+ return text
Model/trcaptionnet.py ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import numpy
3
+
4
+ import torch
5
+ from torch import nn
6
+ from PIL import Image
7
+ from transformers import BertTokenizer
8
+
9
+ from Model import clip
10
+ from Model.bert import BertLMHeadModel, BertConfig
11
+ from Model.clip.model import Transformer
12
+
13
+
14
+ class Proj(nn.Module):
15
+
16
+ def __init__(self, encoder_output_size, num_head=16):
17
+ super().__init__()
18
+ self.encoder_output_size = encoder_output_size
19
+
20
+ self.transformer = Transformer(encoder_output_size, 1, num_head)
21
+ self.linear = nn.Linear(encoder_output_size, 768)
22
+ return
23
+
24
+ def forward(self, x):
25
+ x = x.permute(1, 0, 2) # NLD -> LND
26
+ x = self.transformer(x)
27
+ x = x.permute(1, 0, 2) # LND -> NLD
28
+ return self.linear(x)
29
+
30
+
31
+ class TRCaptionNet(nn.Module):
32
+ def __init__(self, config: dict):
33
+ super().__init__()
34
+ # parameters
35
+ self.max_length = config["max_length"]
36
+ self.proj_flag = config["proj"]
37
+ assert type(self.proj_flag) == bool
38
+ self.proj_num_head = config["proj_num_head"]
39
+
40
+ # vision encoder
41
+ self.vision_encoder, preprocess = clip.load(config["clip"], jit=False)
42
+ self.vision_encoder.eval()
43
+ self.vision_encoder = self.vision_encoder.visual.float()
44
+ with torch.no_grad():
45
+ dummy_input_image = preprocess(Image.fromarray(numpy.zeros((512, 512, 3), dtype=numpy.uint8))).to(next(self.parameters()).device)
46
+ encoder_output_size = self.vision_encoder(dummy_input_image.unsqueeze(0)).shape[-1]
47
+
48
+ # language decoder
49
+ if not os.path.isfile(config["bert"]):
50
+ self.language_decoder = BertLMHeadModel.from_pretrained(config["bert"],
51
+ is_decoder=True,
52
+ add_cross_attention=True)
53
+ self.tokenizer = BertTokenizer.from_pretrained(config["bert"])
54
+ else:
55
+ med_config = BertConfig.from_json_file(config["bert"])
56
+ self.language_decoder = BertLMHeadModel(config=med_config)
57
+ self.tokenizer = BertTokenizer.from_pretrained("dbmdz/bert-base-turkish-cased")
58
+
59
+ # proj
60
+ if self.proj_flag:
61
+ if self.proj_num_head is None:
62
+ self.proj = nn.Linear(encoder_output_size, 768)
63
+ else:
64
+ self.proj = Proj(encoder_output_size, self.proj_num_head)
65
+ else:
66
+ self.proj = None
67
+ return
68
+
69
+ @torch.no_grad()
70
+ def generate(self, images, max_length: int = None, min_length: int = 12, num_beams: int = 3,
71
+ repetition_penalty: float = 1.1):
72
+ image_embeds = self.vision_encoder(images)
73
+
74
+ if self.proj is not None:
75
+ image_embeds = self.proj(image_embeds)
76
+
77
+ image_atts = torch.ones(image_embeds.shape[:-1], dtype=torch.long).to(images.device)
78
+ model_kwargs = {"encoder_hidden_states": image_embeds, "encoder_attention_mask": image_atts}
79
+
80
+ input_ids = torch.ones((image_embeds.shape[0], 1), device=images.device, dtype=torch.long)
81
+ input_ids *= 2
82
+
83
+ outputs = self.language_decoder.generate(input_ids=input_ids,
84
+ max_length=self.max_length if max_length is None else max_length,
85
+ min_length=min_length,
86
+ num_beams=num_beams,
87
+ eos_token_id=self.tokenizer.sep_token_id,
88
+ pad_token_id=self.tokenizer.pad_token_id,
89
+ repetition_penalty=repetition_penalty,
90
+ **model_kwargs)
91
+
92
+ captions = [self.tokenizer.decode(output, skip_special_tokens=True) for output in outputs]
93
+ return captions
94
+
95
+
96
+ def test():
97
+ model = TRCaptionNet({
98
+ "max_length": 35,
99
+ "clip": "ViT-B/32",
100
+ "bert": "dbmdz/bert-base-turkish-cased"
101
+ })
102
+
103
+ return
104
+
105
+
106
+ if __name__ == '__main__':
107
+ test()
README.md CHANGED
@@ -1,13 +1,25 @@
1
  ---
2
- title: TRCaptionNet TasvirEt
3
- emoji:
4
- colorFrom: green
5
- colorTo: yellow
6
  sdk: gradio
7
- sdk_version: 4.29.0
8
  app_file: app.py
9
- pinned: false
10
- license: mit
11
  ---
 
 
 
 
 
 
 
 
 
 
 
 
 
12
 
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
1
  ---
2
+ title: TRCaptionNet
3
+ emoji: 🖼
4
+ colorFrom: red
5
+ colorTo: indigo
6
  sdk: gradio
 
7
  app_file: app.py
8
+ pinned: true
 
9
  ---
10
+ # Configuration
11
+ `title`: _string_
12
+ TRCaptionNet
13
+ `emoji`: _string_
14
+ 🖼
15
+ `colorFrom`: _string_
16
+ red
17
+ `colorTo`: _string_
18
+ indigo
19
+ `sdk`: _string_
20
+ gradio
21
+ `app_file`: _string_
22
+ app.py
23
 
24
+ `pinned`: _boolean_
25
+ true
app.py ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os.path
2
+
3
+ import gdown
4
+ import gradio as gr
5
+ import torch
6
+
7
+ from Model import TRCaptionNet, clip_transform
8
+
9
+ model_ckpt = "./checkpoints/TRCaptionNet_L14_berturk_tasviret.pth"
10
+
11
+ device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
12
+ # device = "cpu"
13
+
14
+ preprocess = clip_transform(224)
15
+ model = TRCaptionNet({
16
+ "max_length": 35,
17
+ "clip": "ViT-L/14",
18
+ "bert": "dbmdz/bert-base-turkish-cased",
19
+ "proj": True,
20
+ "proj_num_head": 16
21
+ })
22
+ model.load_state_dict(torch.load(model_ckpt, map_location=device)["model"], strict=True)
23
+ model = model.to(device)
24
+ model.eval()
25
+
26
+
27
+ def inference(raw_image, min_length, repetition_penalty):
28
+ batch = preprocess(raw_image).unsqueeze(0).to(device)
29
+ caption = model.generate(batch, min_length=min_length, repetition_penalty=repetition_penalty)[0]
30
+ return caption
31
+
32
+
33
+ inputs = [gr.Image(type='pil', interactive=True,),
34
+ gr.Slider(minimum=6, maximum=22, value=11, label="MINIMUM CAPTION LENGTH", step=1),
35
+ gr.Slider(minimum=1, maximum=2, value=1.6, label="REPETITION PENALTY")]
36
+ outputs = gr.components.Textbox(label="Caption")
37
+ title = "TRCaptionNet"
38
+ paper_link = ""
39
+ github_link = "https://github.com/serdaryildiz/TRCaptionNet"
40
+ description = f"<p style='text-align: center'><a href='{github_link}' target='_blank'>TRCaptionNet</a> : A novel and accurate deep Turkish image captioning model with vision transformer based image encoders and deep linguistic text decoders"
41
+ examples = [
42
+ ["images/test1.jpg"],
43
+ ["images/test2.jpg"],
44
+ ["images/test3.jpg"],
45
+ ["images/test4.jpg"]
46
+ ]
47
+ article = f"<p style='text-align: center'><a href='{paper_link}' target='_blank'>Paper</a> | <a href='{github_link}' target='_blank'>Github Repo</a></p>"
48
+ css = ".output-image, .input-image, .image-preview {height: 600px !important}"
49
+
50
+ iface = gr.Interface(fn=inference,
51
+ inputs=inputs,
52
+ outputs=outputs,
53
+ title=title,
54
+ description=description,
55
+ examples=examples,
56
+ article=article,
57
+ css=css)
58
+ iface.launch()
59
+
checkpoints/dummy.txt ADDED
File without changes
demo.py ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import glob
3
+ import os
4
+
5
+ import cv2
6
+ import numpy
7
+ import torch
8
+ from PIL import Image
9
+
10
+ from Model import TRCaptionNet, clip_transform
11
+
12
+
13
+ def demo(opt):
14
+ preprocess = clip_transform(224)
15
+ model = TRCaptionNet({
16
+ "max_length": 35,
17
+ "clip": "ViT-L/14",
18
+ "bert": "dbmdz/bert-base-turkish-cased",
19
+ "proj": True,
20
+ "proj_num_head": 16
21
+ })
22
+ device = torch.device(opt.device)
23
+ model.load_state_dict(torch.load(opt.model_ckpt, map_location=device)["model"], strict=True)
24
+ model = model.to(device)
25
+ model.eval()
26
+
27
+ image_paths = glob.glob(os.path.join(opt.input_dir, '*.jpg'))
28
+
29
+ for image_path in sorted(image_paths):
30
+ img_name = image_path.split('/')[-1]
31
+ img0 = Image.open(image_path)
32
+ batch = preprocess(img0).unsqueeze(0).to(device)
33
+ caption = model.generate(batch, min_length=11, repetition_penalty=1.6)[0]
34
+ print(f"{img_name} :", caption)
35
+
36
+ orj_img = numpy.array(img0)[:, :, ::-1]
37
+ h, w, _ = orj_img.shape
38
+ new_h = 800
39
+ new_w = int(new_h * (w / h))
40
+ orj_img = cv2.resize(orj_img, (new_w, new_h))
41
+
42
+ cv2.imshow("image", orj_img)
43
+ cv2.waitKey(0)
44
+
45
+ return
46
+
47
+
48
+ if __name__ == '__main__':
49
+ parser = argparse.ArgumentParser(description='Turkish-Image-Captioning!')
50
+ parser.add_argument('--model-ckpt', type=str, default='./checkpoints/TRCaptionNet_L14_berturk.pth')
51
+ parser.add_argument('--input-dir', type=str, default='./images/')
52
+ parser.add_argument('--device', type=str, default='cuda:0')
53
+ args = parser.parse_args()
54
+ demo(args)
images/dummy.txt ADDED
File without changes
requirements.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ torch==2.0.0
2
+ torchvision==0.15.1
3
+ opencv-python==4.6.0.66
4
+ transformers==4.27.3
5
+ ftfy==6.1.1
6
+ gradio==3.48.0
7
+ gdown==4.6.0