erkam commited on
Commit
e58ffa5
1 Parent(s): a0d1c27

Upload modeling_clip.py

Browse files
Files changed (1) hide show
  1. modeling_clip.py +1324 -0
modeling_clip.py ADDED
@@ -0,0 +1,1324 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The OpenAI Team Authors and The HuggingFace Team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ PyTorch CLIP model."""
16
+
17
+
18
+ from dataclasses import dataclass
19
+ from typing import Any, Optional, Tuple, Union
20
+
21
+ import torch
22
+ import torch.utils.checkpoint
23
+ from torch import nn
24
+
25
+ from ...activations import ACT2FN
26
+ from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling
27
+ from ...modeling_utils import PreTrainedModel
28
+ from ...utils import (
29
+ ModelOutput,
30
+ add_start_docstrings,
31
+ add_start_docstrings_to_model_forward,
32
+ logging,
33
+ replace_return_docstrings,
34
+ )
35
+ from .configuration_clip import CLIPConfig, CLIPTextConfig, CLIPVisionConfig
36
+
37
+
38
+ logger = logging.get_logger(__name__)
39
+
40
+ _CHECKPOINT_FOR_DOC = "openai/clip-vit-base-patch32"
41
+
42
+ CLIP_PRETRAINED_MODEL_ARCHIVE_LIST = [
43
+ "openai/clip-vit-base-patch32",
44
+ # See all CLIP models at https://huggingface.co/models?filter=clip
45
+ ]
46
+
47
+
48
+ # Copied from transformers.models.bart.modeling_bart._expand_mask
49
+ def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None):
50
+ """
51
+ Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
52
+ """
53
+ bsz, src_len = mask.size()
54
+ tgt_len = tgt_len if tgt_len is not None else src_len
55
+
56
+ expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype)
57
+
58
+ inverted_mask = 1.0 - expanded_mask
59
+
60
+ return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min)
61
+
62
+
63
+ # contrastive loss function, adapted from
64
+ # https://sachinruk.github.io/blog/pytorch/pytorch%20lightning/loss%20function/gpu/2021/03/07/CLIP.html
65
+ def contrastive_loss(logits: torch.Tensor) -> torch.Tensor:
66
+ return nn.functional.cross_entropy(logits, torch.arange(len(logits), device=logits.device))
67
+
68
+
69
+ def clip_loss(similarity: torch.Tensor) -> torch.Tensor:
70
+ caption_loss = contrastive_loss(similarity)
71
+ image_loss = contrastive_loss(similarity.t())
72
+ return (caption_loss + image_loss) / 2.0
73
+
74
+
75
+ @dataclass
76
+ class CLIPVisionModelOutput(ModelOutput):
77
+ """
78
+ Base class for vision model's outputs that also contains image embeddings of the pooling of the last hidden states.
79
+
80
+ Args:
81
+ image_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim)` *optional* returned when model is initialized with `with_projection=True`):
82
+ The image embeddings obtained by applying the projection layer to the pooler_output.
83
+ last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
84
+ Sequence of hidden-states at the output of the last layer of the model.
85
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
86
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
87
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
88
+
89
+ Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
90
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
91
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
92
+ sequence_length)`.
93
+
94
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
95
+ heads.
96
+ """
97
+
98
+ image_embeds: Optional[torch.FloatTensor] = None
99
+ last_hidden_state: torch.FloatTensor = None
100
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
101
+ attentions: Optional[Tuple[torch.FloatTensor]] = None
102
+
103
+
104
+ @dataclass
105
+ class CLIPTextModelOutput(ModelOutput):
106
+ """
107
+ Base class for text model's outputs that also contains a pooling of the last hidden states.
108
+
109
+ Args:
110
+ text_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim)` *optional* returned when model is initialized with `with_projection=True`):
111
+ The text embeddings obtained by applying the projection layer to the pooler_output.
112
+ last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
113
+ Sequence of hidden-states at the output of the last layer of the model.
114
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
115
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
116
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
117
+
118
+ Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
119
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
120
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
121
+ sequence_length)`.
122
+
123
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
124
+ heads.
125
+ """
126
+
127
+ text_embeds: Optional[torch.FloatTensor] = None
128
+ last_hidden_state: torch.FloatTensor = None
129
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
130
+ attentions: Optional[Tuple[torch.FloatTensor]] = None
131
+
132
+
133
+ @dataclass
134
+ class CLIPOutput(ModelOutput):
135
+ """
136
+ Args:
137
+ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `return_loss` is `True`):
138
+ Contrastive loss for image-text similarity.
139
+ logits_per_image:(`torch.FloatTensor` of shape `(image_batch_size, text_batch_size)`):
140
+ The scaled dot product scores between `image_embeds` and `text_embeds`. This represents the image-text
141
+ similarity scores.
142
+ logits_per_text:(`torch.FloatTensor` of shape `(text_batch_size, image_batch_size)`):
143
+ The scaled dot product scores between `text_embeds` and `image_embeds`. This represents the text-image
144
+ similarity scores.
145
+ text_embeds(`torch.FloatTensor` of shape `(batch_size, output_dim`):
146
+ The text embeddings obtained by applying the projection layer to the pooled output of [`CLIPTextModel`].
147
+ image_embeds(`torch.FloatTensor` of shape `(batch_size, output_dim`):
148
+ The image embeddings obtained by applying the projection layer to the pooled output of [`CLIPVisionModel`].
149
+ text_model_output(`BaseModelOutputWithPooling`):
150
+ The output of the [`CLIPTextModel`].
151
+ vision_model_output(`BaseModelOutputWithPooling`):
152
+ The output of the [`CLIPVisionModel`].
153
+ """
154
+
155
+ loss: Optional[torch.FloatTensor] = None
156
+ logits_per_image: torch.FloatTensor = None
157
+ logits_per_text: torch.FloatTensor = None
158
+ text_embeds: torch.FloatTensor = None
159
+ image_embeds: torch.FloatTensor = None
160
+ text_model_output: BaseModelOutputWithPooling = None
161
+ vision_model_output: BaseModelOutputWithPooling = None
162
+
163
+ def to_tuple(self) -> Tuple[Any]:
164
+ return tuple(
165
+ self[k] if k not in ["text_model_output", "vision_model_output"] else getattr(self, k).to_tuple()
166
+ for k in self.keys()
167
+ )
168
+
169
+
170
+ class CLIPVisionEmbeddings(nn.Module):
171
+ def __init__(self, config: CLIPVisionConfig):
172
+ super().__init__()
173
+ self.config = config
174
+ self.embed_dim = config.hidden_size
175
+ self.image_size = config.image_size
176
+ self.patch_size = config.patch_size
177
+
178
+ self.class_embedding = nn.Parameter(torch.randn(self.embed_dim))
179
+
180
+ self.patch_embedding = nn.Conv2d(
181
+ in_channels=config.num_channels,
182
+ out_channels=self.embed_dim,
183
+ kernel_size=self.patch_size,
184
+ stride=self.patch_size,
185
+ bias=False,
186
+ )
187
+
188
+ self.num_patches = (self.image_size // self.patch_size) ** 2
189
+ self.num_positions = self.num_patches + 1
190
+ self.position_embedding = nn.Embedding(self.num_positions, self.embed_dim)
191
+ self.register_buffer("position_ids", torch.arange(self.num_positions).expand((1, -1)))
192
+
193
+ def forward(self, pixel_values: torch.FloatTensor) -> torch.Tensor:
194
+ batch_size = pixel_values.shape[0]
195
+ patch_embeds = self.patch_embedding(pixel_values) # shape = [*, width, grid, grid]
196
+ patch_embeds = patch_embeds.flatten(2).transpose(1, 2)
197
+
198
+ class_embeds = self.class_embedding.expand(batch_size, 1, -1)
199
+ embeddings = torch.cat([class_embeds, patch_embeds], dim=1)
200
+ embeddings = embeddings + self.position_embedding(self.position_ids)
201
+ return embeddings
202
+
203
+
204
+ class CLIPTextEmbeddings(nn.Module):
205
+ def __init__(self, config: CLIPTextConfig):
206
+ super().__init__()
207
+ embed_dim = config.hidden_size
208
+
209
+ self.token_embedding = nn.Embedding(config.vocab_size, embed_dim)
210
+ self.position_embedding = nn.Embedding(config.max_position_embeddings, embed_dim)
211
+
212
+ # position_ids (1, len position emb) is contiguous in memory and exported when serialized
213
+ self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)))
214
+
215
+ def forward(
216
+ self,
217
+ input_ids: Optional[torch.LongTensor] = None,
218
+ position_ids: Optional[torch.LongTensor] = None,
219
+ inputs_embeds: Optional[torch.FloatTensor] = None,
220
+ ) -> torch.Tensor:
221
+ seq_length = input_ids.shape[-1] if input_ids is not None else inputs_embeds.shape[-2]
222
+
223
+ if position_ids is None:
224
+ position_ids = self.position_ids[:, :seq_length]
225
+
226
+ if inputs_embeds is None:
227
+ inputs_embeds = self.token_embedding(input_ids)
228
+
229
+ position_embeddings = self.position_embedding(position_ids.long())
230
+ embeddings = inputs_embeds + position_embeddings
231
+
232
+ return embeddings
233
+
234
+
235
+ class CLIPAttention(nn.Module):
236
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
237
+
238
+ def __init__(self, config):
239
+ super().__init__()
240
+ self.config = config
241
+ self.embed_dim = config.hidden_size
242
+ self.num_heads = config.num_attention_heads
243
+ self.head_dim = self.embed_dim // self.num_heads
244
+ if self.head_dim * self.num_heads != self.embed_dim:
245
+ raise ValueError(
246
+ f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:"
247
+ f" {self.num_heads})."
248
+ )
249
+ self.scale = self.head_dim**-0.5
250
+ self.dropout = config.attention_dropout
251
+
252
+ self.k_proj = nn.Linear(self.embed_dim, self.embed_dim)
253
+ self.v_proj = nn.Linear(self.embed_dim, self.embed_dim)
254
+ self.q_proj = nn.Linear(self.embed_dim, self.embed_dim)
255
+ self.out_proj = nn.Linear(self.embed_dim, self.embed_dim)
256
+
257
+ def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
258
+ return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
259
+
260
+ def forward(
261
+ self,
262
+ hidden_states: torch.Tensor,
263
+ attention_mask: Optional[torch.Tensor] = None,
264
+ causal_attention_mask: Optional[torch.Tensor] = None,
265
+ output_attentions: Optional[bool] = False,
266
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
267
+ """Input shape: Batch x Time x Channel"""
268
+
269
+ bsz, tgt_len, embed_dim = hidden_states.size()
270
+
271
+ # get query proj
272
+ query_states = self.q_proj(hidden_states) * self.scale
273
+ key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
274
+ value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
275
+
276
+ proj_shape = (bsz * self.num_heads, -1, self.head_dim)
277
+ query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape)
278
+ key_states = key_states.view(*proj_shape)
279
+ value_states = value_states.view(*proj_shape)
280
+
281
+ src_len = key_states.size(1)
282
+ attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
283
+
284
+ if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len):
285
+ raise ValueError(
286
+ f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is"
287
+ f" {attn_weights.size()}"
288
+ )
289
+
290
+ # apply the causal_attention_mask first
291
+ if causal_attention_mask is not None:
292
+ if causal_attention_mask.size() != (bsz, 1, tgt_len, src_len):
293
+ raise ValueError(
294
+ f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is"
295
+ f" {causal_attention_mask.size()}"
296
+ )
297
+ attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + causal_attention_mask
298
+ attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
299
+
300
+ if attention_mask is not None:
301
+ if attention_mask.size() != (bsz, 1, tgt_len, src_len):
302
+ raise ValueError(
303
+ f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}"
304
+ )
305
+ attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask
306
+ attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
307
+
308
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1)
309
+
310
+ if output_attentions:
311
+ # this operation is a bit akward, but it's required to
312
+ # make sure that attn_weights keeps its gradient.
313
+ # In order to do so, attn_weights have to reshaped
314
+ # twice and have to be reused in the following
315
+ attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
316
+ attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len)
317
+ else:
318
+ attn_weights_reshaped = None
319
+
320
+ attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
321
+
322
+ attn_output = torch.bmm(attn_probs, value_states)
323
+
324
+ if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim):
325
+ raise ValueError(
326
+ f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is"
327
+ f" {attn_output.size()}"
328
+ )
329
+
330
+ attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim)
331
+ attn_output = attn_output.transpose(1, 2)
332
+ attn_output = attn_output.reshape(bsz, tgt_len, embed_dim)
333
+
334
+ attn_output = self.out_proj(attn_output)
335
+
336
+ return attn_output, attn_weights_reshaped
337
+
338
+
339
+ class CLIPMLP(nn.Module):
340
+ def __init__(self, config):
341
+ super().__init__()
342
+ self.config = config
343
+ self.activation_fn = ACT2FN[config.hidden_act]
344
+ self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size)
345
+ self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size)
346
+
347
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
348
+ hidden_states = self.fc1(hidden_states)
349
+ hidden_states = self.activation_fn(hidden_states)
350
+ hidden_states = self.fc2(hidden_states)
351
+ return hidden_states
352
+
353
+
354
+ class CLIPEncoderLayer(nn.Module):
355
+ def __init__(self, config: CLIPConfig):
356
+ super().__init__()
357
+ self.embed_dim = config.hidden_size
358
+ self.self_attn = CLIPAttention(config)
359
+ self.layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
360
+ self.mlp = CLIPMLP(config)
361
+ self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
362
+
363
+ def forward(
364
+ self,
365
+ hidden_states: torch.Tensor,
366
+ attention_mask: torch.Tensor,
367
+ causal_attention_mask: torch.Tensor,
368
+ output_attentions: Optional[bool] = False,
369
+ ) -> Tuple[torch.FloatTensor]:
370
+ """
371
+ Args:
372
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
373
+ attention_mask (`torch.FloatTensor`): attention mask of size
374
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
375
+ `(config.encoder_attention_heads,)`.
376
+ output_attentions (`bool`, *optional*):
377
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
378
+ returned tensors for more detail.
379
+ """
380
+ residual = hidden_states
381
+
382
+ hidden_states = self.layer_norm1(hidden_states)
383
+ hidden_states, attn_weights = self.self_attn(
384
+ hidden_states=hidden_states,
385
+ attention_mask=attention_mask,
386
+ causal_attention_mask=causal_attention_mask,
387
+ output_attentions=output_attentions,
388
+ )
389
+ hidden_states = residual + hidden_states
390
+
391
+ residual = hidden_states
392
+ hidden_states = self.layer_norm2(hidden_states)
393
+ hidden_states = self.mlp(hidden_states)
394
+ hidden_states = residual + hidden_states
395
+
396
+ outputs = (hidden_states,)
397
+
398
+ if output_attentions:
399
+ outputs += (attn_weights,)
400
+
401
+ return outputs
402
+
403
+
404
+ class CLIPPreTrainedModel(PreTrainedModel):
405
+ """
406
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
407
+ models.
408
+ """
409
+
410
+ config_class = CLIPConfig
411
+ base_model_prefix = "clip"
412
+ supports_gradient_checkpointing = True
413
+ _keys_to_ignore_on_load_missing = [r"position_ids"]
414
+
415
+ def _init_weights(self, module):
416
+ """Initialize the weights"""
417
+ factor = self.config.initializer_factor
418
+ if isinstance(module, CLIPTextEmbeddings):
419
+ module.token_embedding.weight.data.normal_(mean=0.0, std=factor * 0.02)
420
+ module.position_embedding.weight.data.normal_(mean=0.0, std=factor * 0.02)
421
+ elif isinstance(module, CLIPVisionEmbeddings):
422
+ factor = self.config.initializer_factor
423
+ nn.init.normal_(module.class_embedding, mean=0.0, std=module.embed_dim**-0.5 * factor)
424
+ nn.init.normal_(module.patch_embedding.weight, std=module.config.initializer_range * factor)
425
+ nn.init.normal_(module.position_embedding.weight, std=module.config.initializer_range * factor)
426
+ elif isinstance(module, CLIPAttention):
427
+ factor = self.config.initializer_factor
428
+ in_proj_std = (module.embed_dim**-0.5) * ((2 * module.config.num_hidden_layers) ** -0.5) * factor
429
+ out_proj_std = (module.embed_dim**-0.5) * factor
430
+ nn.init.normal_(module.q_proj.weight, std=in_proj_std)
431
+ nn.init.normal_(module.k_proj.weight, std=in_proj_std)
432
+ nn.init.normal_(module.v_proj.weight, std=in_proj_std)
433
+ nn.init.normal_(module.out_proj.weight, std=out_proj_std)
434
+ elif isinstance(module, CLIPMLP):
435
+ factor = self.config.initializer_factor
436
+ in_proj_std = (
437
+ (module.config.hidden_size**-0.5) * ((2 * module.config.num_hidden_layers) ** -0.5) * factor
438
+ )
439
+ fc_std = (2 * module.config.hidden_size) ** -0.5 * factor
440
+ nn.init.normal_(module.fc1.weight, std=fc_std)
441
+ nn.init.normal_(module.fc2.weight, std=in_proj_std)
442
+ elif isinstance(module, CLIPModel):
443
+ nn.init.normal_(
444
+ module.text_projection.weight,
445
+ std=module.text_embed_dim**-0.5 * self.config.initializer_factor,
446
+ )
447
+ nn.init.normal_(
448
+ module.visual_projection.weight,
449
+ std=module.vision_embed_dim**-0.5 * self.config.initializer_factor,
450
+ )
451
+ elif isinstance(module, CLIPVisionModelWithProjection):
452
+ nn.init.normal_(
453
+ module.visual_projection.weight,
454
+ std=self.config.hidden_size**-0.5 * self.config.initializer_factor,
455
+ )
456
+ elif isinstance(module, CLIPTextModelWithProjection):
457
+ nn.init.normal_(
458
+ module.text_projection.weight,
459
+ std=self.config.hidden_size**-0.5 * self.config.initializer_factor,
460
+ )
461
+
462
+ if isinstance(module, nn.LayerNorm):
463
+ module.bias.data.zero_()
464
+ module.weight.data.fill_(1.0)
465
+ if isinstance(module, nn.Linear) and module.bias is not None:
466
+ module.bias.data.zero_()
467
+
468
+ def _set_gradient_checkpointing(self, module, value=False):
469
+ if isinstance(module, CLIPEncoder):
470
+ module.gradient_checkpointing = value
471
+
472
+
473
+ CLIP_START_DOCSTRING = r"""
474
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
475
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
476
+ etc.)
477
+
478
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
479
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
480
+ and behavior.
481
+
482
+ Parameters:
483
+ config ([`CLIPConfig`]): Model configuration class with all the parameters of the model.
484
+ Initializing with a config file does not load the weights associated with the model, only the
485
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
486
+ """
487
+
488
+ CLIP_TEXT_INPUTS_DOCSTRING = r"""
489
+ Args:
490
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
491
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
492
+ it.
493
+
494
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
495
+ [`PreTrainedTokenizer.__call__`] for details.
496
+
497
+ [What are input IDs?](../glossary#input-ids)
498
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
499
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
500
+
501
+ - 1 for tokens that are **not masked**,
502
+ - 0 for tokens that are **masked**.
503
+
504
+ [What are attention masks?](../glossary#attention-mask)
505
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
506
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
507
+ config.max_position_embeddings - 1]`.
508
+
509
+ [What are position IDs?](../glossary#position-ids)
510
+ output_attentions (`bool`, *optional*):
511
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
512
+ tensors for more detail.
513
+ output_hidden_states (`bool`, *optional*):
514
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
515
+ more detail.
516
+ return_dict (`bool`, *optional*):
517
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
518
+ """
519
+
520
+ CLIP_VISION_INPUTS_DOCSTRING = r"""
521
+ Args:
522
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
523
+ Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using
524
+ [`AutoImageProcessor`]. See [`CLIPImageProcessor.__call__`] for details.
525
+ output_attentions (`bool`, *optional*):
526
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
527
+ tensors for more detail.
528
+ output_hidden_states (`bool`, *optional*):
529
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
530
+ more detail.
531
+ return_dict (`bool`, *optional*):
532
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
533
+ """
534
+
535
+ CLIP_INPUTS_DOCSTRING = r"""
536
+ Args:
537
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
538
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
539
+ it.
540
+
541
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
542
+ [`PreTrainedTokenizer.__call__`] for details.
543
+
544
+ [What are input IDs?](../glossary#input-ids)
545
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
546
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
547
+
548
+ - 1 for tokens that are **not masked**,
549
+ - 0 for tokens that are **masked**.
550
+
551
+ [What are attention masks?](../glossary#attention-mask)
552
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
553
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
554
+ config.max_position_embeddings - 1]`.
555
+
556
+ [What are position IDs?](../glossary#position-ids)
557
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
558
+ Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using
559
+ [`AutoImageProcessor`]. See [`CLIPImageProcessor.__call__`] for details.
560
+ return_loss (`bool`, *optional*):
561
+ Whether or not to return the contrastive loss.
562
+ output_attentions (`bool`, *optional*):
563
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
564
+ tensors for more detail.
565
+ output_hidden_states (`bool`, *optional*):
566
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
567
+ more detail.
568
+ return_dict (`bool`, *optional*):
569
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
570
+ """
571
+
572
+
573
+ class CLIPEncoder(nn.Module):
574
+ """
575
+ Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a
576
+ [`CLIPEncoderLayer`].
577
+
578
+ Args:
579
+ config: CLIPConfig
580
+ """
581
+
582
+ def __init__(self, config: CLIPConfig):
583
+ super().__init__()
584
+ self.config = config
585
+ self.layers = nn.ModuleList([CLIPEncoderLayer(config) for _ in range(config.num_hidden_layers)])
586
+ self.gradient_checkpointing = False
587
+
588
+ def forward(
589
+ self,
590
+ inputs_embeds,
591
+ attention_mask: Optional[torch.Tensor] = None,
592
+ causal_attention_mask: Optional[torch.Tensor] = None,
593
+ output_attentions: Optional[bool] = None,
594
+ output_hidden_states: Optional[bool] = None,
595
+ return_dict: Optional[bool] = None,
596
+ ) -> Union[Tuple, BaseModelOutput]:
597
+ r"""
598
+ Args:
599
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
600
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
601
+ This is useful if you want more control over how to convert `input_ids` indices into associated vectors
602
+ than the model's internal embedding lookup matrix.
603
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
604
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
605
+
606
+ - 1 for tokens that are **not masked**,
607
+ - 0 for tokens that are **masked**.
608
+
609
+ [What are attention masks?](../glossary#attention-mask)
610
+ causal_attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
611
+ Causal mask for the text model. Mask values selected in `[0, 1]`:
612
+
613
+ - 1 for tokens that are **not masked**,
614
+ - 0 for tokens that are **masked**.
615
+
616
+ [What are attention masks?](../glossary#attention-mask)
617
+ output_attentions (`bool`, *optional*):
618
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
619
+ returned tensors for more detail.
620
+ output_hidden_states (`bool`, *optional*):
621
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
622
+ for more detail.
623
+ return_dict (`bool`, *optional*):
624
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
625
+ """
626
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
627
+ output_hidden_states = (
628
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
629
+ )
630
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
631
+
632
+ encoder_states = () if output_hidden_states else None
633
+ all_attentions = () if output_attentions else None
634
+
635
+ hidden_states = inputs_embeds
636
+ for idx, encoder_layer in enumerate(self.layers):
637
+ if output_hidden_states:
638
+ encoder_states = encoder_states + (hidden_states,)
639
+ if self.gradient_checkpointing and self.training:
640
+
641
+ def create_custom_forward(module):
642
+ def custom_forward(*inputs):
643
+ return module(*inputs, output_attentions)
644
+
645
+ return custom_forward
646
+
647
+ layer_outputs = torch.utils.checkpoint.checkpoint(
648
+ create_custom_forward(encoder_layer),
649
+ hidden_states,
650
+ attention_mask,
651
+ causal_attention_mask,
652
+ )
653
+ else:
654
+ layer_outputs = encoder_layer(
655
+ hidden_states,
656
+ attention_mask,
657
+ causal_attention_mask,
658
+ output_attentions=output_attentions,
659
+ )
660
+
661
+ hidden_states = layer_outputs[0]
662
+
663
+ if output_attentions:
664
+ all_attentions = all_attentions + (layer_outputs[1],)
665
+
666
+ if output_hidden_states:
667
+ encoder_states = encoder_states + (hidden_states,)
668
+
669
+ if not return_dict:
670
+ return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
671
+ return BaseModelOutput(
672
+ last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
673
+ )
674
+
675
+
676
+ class CLIPTextTransformer(nn.Module):
677
+ def __init__(self, config: CLIPTextConfig):
678
+ super().__init__()
679
+ self.config = config
680
+ embed_dim = config.hidden_size
681
+ self.embeddings = CLIPTextEmbeddings(config)
682
+ self.encoder = CLIPEncoder(config)
683
+ self.final_layer_norm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
684
+
685
+ @add_start_docstrings_to_model_forward(CLIP_TEXT_INPUTS_DOCSTRING)
686
+ @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=CLIPTextConfig)
687
+ def forward(
688
+ self,
689
+ input_ids: Optional[torch.Tensor] = None,
690
+ attention_mask: Optional[torch.Tensor] = None,
691
+ position_ids: Optional[torch.Tensor] = None,
692
+ output_attentions: Optional[bool] = None,
693
+ output_hidden_states: Optional[bool] = None,
694
+ return_dict: Optional[bool] = None,
695
+ ) -> Union[Tuple, BaseModelOutputWithPooling]:
696
+ r"""
697
+ Returns:
698
+
699
+ """
700
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
701
+ output_hidden_states = (
702
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
703
+ )
704
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
705
+
706
+ if input_ids is None:
707
+ raise ValueError("You have to specify input_ids")
708
+
709
+ input_shape = input_ids.size()
710
+ input_ids = input_ids.view(-1, input_shape[-1])
711
+
712
+ hidden_states = self.embeddings(input_ids=input_ids, position_ids=position_ids)
713
+
714
+ bsz, seq_len = input_shape
715
+ # CLIP's text model uses causal mask, prepare it here.
716
+ # https://github.com/openai/CLIP/blob/cfcffb90e69f37bf2ff1e988237a0fbe41f33c04/clip/model.py#L324
717
+ causal_attention_mask = self._build_causal_attention_mask(
718
+ bsz, seq_len, hidden_states.dtype, device=hidden_states.device
719
+ )
720
+ # expand attention_mask
721
+ if attention_mask is not None:
722
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
723
+ attention_mask = _expand_mask(attention_mask, hidden_states.dtype)
724
+
725
+ encoder_outputs = self.encoder(
726
+ inputs_embeds=hidden_states,
727
+ attention_mask=attention_mask,
728
+ causal_attention_mask=causal_attention_mask,
729
+ output_attentions=output_attentions,
730
+ output_hidden_states=output_hidden_states,
731
+ return_dict=return_dict,
732
+ )
733
+
734
+ last_hidden_state = encoder_outputs[0]
735
+ last_hidden_state = self.final_layer_norm(last_hidden_state)
736
+
737
+ # text_embeds.shape = [batch_size, sequence_length, transformer.width]
738
+ # take features from the eot embedding (eot_token is the highest number in each sequence)
739
+ # casting to torch.int for onnx compatibility: argmax doesn't support int64 inputs with opset 14
740
+ pooled_output = last_hidden_state[
741
+ torch.arange(last_hidden_state.shape[0], device=last_hidden_state.device),
742
+ input_ids.to(dtype=torch.int, device=last_hidden_state.device).argmax(dim=-1),
743
+ ]
744
+
745
+ if not return_dict:
746
+ return (last_hidden_state, pooled_output) + encoder_outputs[1:]
747
+
748
+ return BaseModelOutputWithPooling(
749
+ last_hidden_state=last_hidden_state,
750
+ pooler_output=pooled_output,
751
+ hidden_states=encoder_outputs.hidden_states,
752
+ attentions=encoder_outputs.attentions,
753
+ )
754
+
755
+ def _build_causal_attention_mask(self, bsz, seq_len, dtype, device=None):
756
+ # lazily create causal attention mask, with full attention between the vision tokens
757
+ # pytorch uses additive attention mask; fill with -inf
758
+ mask = torch.empty(bsz, seq_len, seq_len, dtype=dtype, device=device)
759
+ mask.fill_(torch.finfo(dtype).min)
760
+ mask.triu_(1) # zero out the lower diagonal
761
+ mask = mask.unsqueeze(1) # expand mask
762
+ return mask
763
+
764
+
765
+ @add_start_docstrings(
766
+ """The text model from CLIP without any head or projection on top.""",
767
+ CLIP_START_DOCSTRING,
768
+ )
769
+ class CLIPTextModel(CLIPPreTrainedModel):
770
+ config_class = CLIPTextConfig
771
+
772
+ _no_split_modules = ["CLIPEncoderLayer"]
773
+
774
+ def __init__(self, config: CLIPTextConfig):
775
+ super().__init__(config)
776
+ self.text_model = CLIPTextTransformer(config)
777
+ # Initialize weights and apply final processing
778
+ self.post_init()
779
+
780
+ def get_input_embeddings(self) -> nn.Module:
781
+ return self.text_model.embeddings.token_embedding
782
+
783
+ def set_input_embeddings(self, value):
784
+ self.text_model.embeddings.token_embedding = value
785
+
786
+ @add_start_docstrings_to_model_forward(CLIP_TEXT_INPUTS_DOCSTRING)
787
+ @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=CLIPTextConfig)
788
+ def forward(
789
+ self,
790
+ input_ids: Optional[torch.Tensor] = None,
791
+ attention_mask: Optional[torch.Tensor] = None,
792
+ position_ids: Optional[torch.Tensor] = None,
793
+ output_attentions: Optional[bool] = None,
794
+ output_hidden_states: Optional[bool] = None,
795
+ return_dict: Optional[bool] = None,
796
+ ) -> Union[Tuple, BaseModelOutputWithPooling]:
797
+ r"""
798
+ Returns:
799
+
800
+ Examples:
801
+
802
+ ```python
803
+ >>> from transformers import AutoTokenizer, CLIPTextModel
804
+
805
+ >>> model = CLIPTextModel.from_pretrained("openai/clip-vit-base-patch32")
806
+ >>> tokenizer = AutoTokenizer.from_pretrained("openai/clip-vit-base-patch32")
807
+
808
+ >>> inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="pt")
809
+
810
+ >>> outputs = model(**inputs)
811
+ >>> last_hidden_state = outputs.last_hidden_state
812
+ >>> pooled_output = outputs.pooler_output # pooled (EOS token) states
813
+ ```"""
814
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
815
+
816
+ return self.text_model(
817
+ input_ids=input_ids,
818
+ attention_mask=attention_mask,
819
+ position_ids=position_ids,
820
+ output_attentions=output_attentions,
821
+ output_hidden_states=output_hidden_states,
822
+ return_dict=return_dict,
823
+ )
824
+
825
+
826
+ class CLIPVisionTransformer(nn.Module):
827
+ def __init__(self, config: CLIPVisionConfig):
828
+ super().__init__()
829
+ self.config = config
830
+ embed_dim = config.hidden_size
831
+
832
+ self.embeddings = CLIPVisionEmbeddings(config)
833
+ self.pre_layrnorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
834
+ self.encoder = CLIPEncoder(config)
835
+ self.post_layernorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
836
+
837
+ @add_start_docstrings_to_model_forward(CLIP_VISION_INPUTS_DOCSTRING)
838
+ @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=CLIPVisionConfig)
839
+ def forward(
840
+ self,
841
+ pixel_values: Optional[torch.FloatTensor] = None,
842
+ output_attentions: Optional[bool] = None,
843
+ output_hidden_states: Optional[bool] = None,
844
+ return_dict: Optional[bool] = None,
845
+ ) -> Union[Tuple, BaseModelOutputWithPooling]:
846
+ r"""
847
+ Returns:
848
+
849
+ """
850
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
851
+ output_hidden_states = (
852
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
853
+ )
854
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
855
+
856
+ if pixel_values is None:
857
+ raise ValueError("You have to specify pixel_values")
858
+
859
+ hidden_states = self.embeddings(pixel_values)
860
+ hidden_states = self.pre_layrnorm(hidden_states)
861
+
862
+ encoder_outputs = self.encoder(
863
+ inputs_embeds=hidden_states,
864
+ output_attentions=output_attentions,
865
+ output_hidden_states=output_hidden_states,
866
+ return_dict=return_dict,
867
+ )
868
+
869
+ last_hidden_state = encoder_outputs[0]
870
+ pooled_output = last_hidden_state[:, 0, :]
871
+ pooled_output = self.post_layernorm(pooled_output)
872
+
873
+ if not return_dict:
874
+ return (last_hidden_state, pooled_output) + encoder_outputs[1:]
875
+
876
+ return BaseModelOutputWithPooling(
877
+ last_hidden_state=last_hidden_state,
878
+ pooler_output=pooled_output,
879
+ hidden_states=encoder_outputs.hidden_states,
880
+ attentions=encoder_outputs.attentions,
881
+ )
882
+
883
+
884
+ @add_start_docstrings(
885
+ """The vision model from CLIP without any head or projection on top.""",
886
+ CLIP_START_DOCSTRING,
887
+ )
888
+ class CLIPVisionModel(CLIPPreTrainedModel):
889
+ config_class = CLIPVisionConfig
890
+ main_input_name = "pixel_values"
891
+
892
+ def __init__(self, config: CLIPVisionConfig):
893
+ super().__init__(config)
894
+ self.vision_model = CLIPVisionTransformer(config)
895
+ # Initialize weights and apply final processing
896
+ self.post_init()
897
+
898
+ def get_input_embeddings(self) -> nn.Module:
899
+ return self.vision_model.embeddings.patch_embedding
900
+
901
+ @add_start_docstrings_to_model_forward(CLIP_VISION_INPUTS_DOCSTRING)
902
+ @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=CLIPVisionConfig)
903
+ def forward(
904
+ self,
905
+ pixel_values: Optional[torch.FloatTensor] = None,
906
+ output_attentions: Optional[bool] = None,
907
+ output_hidden_states: Optional[bool] = None,
908
+ return_dict: Optional[bool] = None,
909
+ ) -> Union[Tuple, BaseModelOutputWithPooling]:
910
+ r"""
911
+ Returns:
912
+
913
+ Examples:
914
+
915
+ ```python
916
+ >>> from PIL import Image
917
+ >>> import requests
918
+ >>> from transformers import AutoProcessor, CLIPVisionModel
919
+
920
+ >>> model = CLIPVisionModel.from_pretrained("openai/clip-vit-base-patch32")
921
+ >>> processor = AutoProcessor.from_pretrained("openai/clip-vit-base-patch32")
922
+
923
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
924
+ >>> image = Image.open(requests.get(url, stream=True).raw)
925
+
926
+ >>> inputs = processor(images=image, return_tensors="pt")
927
+
928
+ >>> outputs = model(**inputs)
929
+ >>> last_hidden_state = outputs.last_hidden_state
930
+ >>> pooled_output = outputs.pooler_output # pooled CLS states
931
+ ```"""
932
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
933
+
934
+ return self.vision_model(
935
+ pixel_values=pixel_values,
936
+ output_attentions=output_attentions,
937
+ output_hidden_states=output_hidden_states,
938
+ return_dict=return_dict,
939
+ )
940
+
941
+
942
+ @add_start_docstrings(CLIP_START_DOCSTRING)
943
+ class CLIPModel(CLIPPreTrainedModel):
944
+ config_class = CLIPConfig
945
+
946
+ def __init__(self, config: CLIPConfig):
947
+ super().__init__(config)
948
+
949
+ if not isinstance(config.text_config, CLIPTextConfig):
950
+ raise ValueError(
951
+ "config.text_config is expected to be of type CLIPTextConfig but is of type"
952
+ f" {type(config.text_config)}."
953
+ )
954
+
955
+ if not isinstance(config.vision_config, CLIPVisionConfig):
956
+ raise ValueError(
957
+ "config.vision_config is expected to be of type CLIPVisionConfig but is of type"
958
+ f" {type(config.vision_config)}."
959
+ )
960
+
961
+ text_config = config.text_config
962
+ vision_config = config.vision_config
963
+
964
+ self.projection_dim = config.projection_dim
965
+ self.text_embed_dim = text_config.hidden_size
966
+ self.vision_embed_dim = vision_config.hidden_size
967
+
968
+ self.text_model = CLIPTextTransformer(text_config)
969
+ self.vision_model = CLIPVisionTransformer(vision_config)
970
+
971
+ self.visual_projection = nn.Linear(self.vision_embed_dim, self.projection_dim, bias=False)
972
+ self.text_projection = nn.Linear(self.text_embed_dim, self.projection_dim, bias=False)
973
+ self.logit_scale = nn.Parameter(torch.ones([]) * self.config.logit_scale_init_value)
974
+
975
+ # Initialize weights and apply final processing
976
+ self.post_init()
977
+
978
+ @add_start_docstrings_to_model_forward(CLIP_TEXT_INPUTS_DOCSTRING)
979
+ def get_text_features(
980
+ self,
981
+ input_ids: Optional[torch.Tensor] = None,
982
+ attention_mask: Optional[torch.Tensor] = None,
983
+ position_ids: Optional[torch.Tensor] = None,
984
+ output_attentions: Optional[bool] = None,
985
+ output_hidden_states: Optional[bool] = None,
986
+ return_dict: Optional[bool] = None,
987
+ ) -> torch.FloatTensor:
988
+ r"""
989
+ Returns:
990
+ text_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The text embeddings obtained by
991
+ applying the projection layer to the pooled output of [`CLIPTextModel`].
992
+
993
+ Examples:
994
+
995
+ ```python
996
+ >>> from transformers import AutoTokenizer, CLIPModel
997
+
998
+ >>> model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32")
999
+ >>> tokenizer = AutoTokenizer.from_pretrained("openai/clip-vit-base-patch32")
1000
+
1001
+ >>> inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="pt")
1002
+ >>> text_features = model.get_text_features(**inputs)
1003
+ ```"""
1004
+ # Use CLIP model's config for some fields (if specified) instead of those of vision & text components.
1005
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1006
+ output_hidden_states = (
1007
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1008
+ )
1009
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1010
+
1011
+ text_outputs = self.text_model(
1012
+ input_ids=input_ids,
1013
+ attention_mask=attention_mask,
1014
+ position_ids=position_ids,
1015
+ output_attentions=output_attentions,
1016
+ output_hidden_states=output_hidden_states,
1017
+ return_dict=return_dict,
1018
+ )
1019
+
1020
+ pooled_output = text_outputs[1]
1021
+ text_features = self.text_projection(pooled_output)
1022
+
1023
+ return text_features
1024
+
1025
+ @add_start_docstrings_to_model_forward(CLIP_VISION_INPUTS_DOCSTRING)
1026
+ def get_image_features(
1027
+ self,
1028
+ pixel_values: Optional[torch.FloatTensor] = None,
1029
+ output_attentions: Optional[bool] = None,
1030
+ output_hidden_states: Optional[bool] = None,
1031
+ return_dict: Optional[bool] = None,
1032
+ ) -> torch.FloatTensor:
1033
+ r"""
1034
+ Returns:
1035
+ image_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The image embeddings obtained by
1036
+ applying the projection layer to the pooled output of [`CLIPVisionModel`].
1037
+
1038
+ Examples:
1039
+
1040
+ ```python
1041
+ >>> from PIL import Image
1042
+ >>> import requests
1043
+ >>> from transformers import AutoProcessor, CLIPModel
1044
+
1045
+ >>> model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32")
1046
+ >>> processor = AutoProcessor.from_pretrained("openai/clip-vit-base-patch32")
1047
+
1048
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
1049
+ >>> image = Image.open(requests.get(url, stream=True).raw)
1050
+
1051
+ >>> inputs = processor(images=image, return_tensors="pt")
1052
+
1053
+ >>> image_features = model.get_image_features(**inputs)
1054
+ ```"""
1055
+ # Use CLIP model's config for some fields (if specified) instead of those of vision & text components.
1056
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1057
+ output_hidden_states = (
1058
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1059
+ )
1060
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1061
+
1062
+ vision_outputs = self.vision_model(
1063
+ pixel_values=pixel_values,
1064
+ output_attentions=output_attentions,
1065
+ output_hidden_states=output_hidden_states,
1066
+ return_dict=return_dict,
1067
+ )
1068
+
1069
+ pooled_output = vision_outputs[1] # pooled_output
1070
+ image_features = self.visual_projection(pooled_output)
1071
+
1072
+ return image_features
1073
+
1074
+ @add_start_docstrings_to_model_forward(CLIP_INPUTS_DOCSTRING)
1075
+ @replace_return_docstrings(output_type=CLIPOutput, config_class=CLIPConfig)
1076
+ def forward(
1077
+ self,
1078
+ input_ids: Optional[torch.LongTensor] = None,
1079
+ pixel_values: Optional[torch.FloatTensor] = None,
1080
+ attention_mask: Optional[torch.Tensor] = None,
1081
+ position_ids: Optional[torch.LongTensor] = None,
1082
+ return_loss: Optional[bool] = None,
1083
+ output_attentions: Optional[bool] = None,
1084
+ output_hidden_states: Optional[bool] = None,
1085
+ return_dict: Optional[bool] = None,
1086
+ ) -> Union[Tuple, CLIPOutput]:
1087
+ r"""
1088
+ Returns:
1089
+
1090
+ Examples:
1091
+
1092
+ ```python
1093
+ >>> from PIL import Image
1094
+ >>> import requests
1095
+ >>> from transformers import AutoProcessor, CLIPModel
1096
+
1097
+ >>> model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32")
1098
+ >>> processor = AutoProcessor.from_pretrained("openai/clip-vit-base-patch32")
1099
+
1100
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
1101
+ >>> image = Image.open(requests.get(url, stream=True).raw)
1102
+
1103
+ >>> inputs = processor(
1104
+ ... text=["a photo of a cat", "a photo of a dog"], images=image, return_tensors="pt", padding=True
1105
+ ... )
1106
+
1107
+ >>> outputs = model(**inputs)
1108
+ >>> logits_per_image = outputs.logits_per_image # this is the image-text similarity score
1109
+ >>> probs = logits_per_image.softmax(dim=1) # we can take the softmax to get the label probabilities
1110
+ ```"""
1111
+ # Use CLIP model's config for some fields (if specified) instead of those of vision & text components.
1112
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1113
+ output_hidden_states = (
1114
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1115
+ )
1116
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1117
+
1118
+ vision_outputs = self.vision_model(
1119
+ pixel_values=pixel_values,
1120
+ output_attentions=output_attentions,
1121
+ output_hidden_states=output_hidden_states,
1122
+ return_dict=return_dict,
1123
+ )
1124
+
1125
+ text_outputs = self.text_model(
1126
+ input_ids=input_ids,
1127
+ attention_mask=attention_mask,
1128
+ position_ids=position_ids,
1129
+ output_attentions=output_attentions,
1130
+ output_hidden_states=output_hidden_states,
1131
+ return_dict=return_dict,
1132
+ )
1133
+
1134
+ image_embeds = vision_outputs[1]
1135
+ image_embeds = self.visual_projection(image_embeds)
1136
+
1137
+ text_embeds = text_outputs[1]
1138
+ text_embeds = self.text_projection(text_embeds)
1139
+
1140
+ # normalized features
1141
+ image_embeds = image_embeds / image_embeds.norm(p=2, dim=-1, keepdim=True)
1142
+ text_embeds = text_embeds / text_embeds.norm(p=2, dim=-1, keepdim=True)
1143
+
1144
+ # cosine similarity as logits
1145
+ logit_scale = self.logit_scale.exp()
1146
+ logits_per_text = torch.matmul(text_embeds, image_embeds.t()) * logit_scale
1147
+ logits_per_image = logits_per_text.t()
1148
+
1149
+ loss = None
1150
+ if return_loss:
1151
+ loss = clip_loss(logits_per_text)
1152
+
1153
+ if not return_dict:
1154
+ output = (logits_per_image, logits_per_text, text_embeds, image_embeds, text_outputs, vision_outputs)
1155
+ return ((loss,) + output) if loss is not None else output
1156
+
1157
+ return CLIPOutput(
1158
+ loss=loss,
1159
+ logits_per_image=logits_per_image,
1160
+ logits_per_text=logits_per_text,
1161
+ text_embeds=text_embeds,
1162
+ image_embeds=image_embeds,
1163
+ text_model_output=text_outputs,
1164
+ vision_model_output=vision_outputs,
1165
+ )
1166
+
1167
+
1168
+ @add_start_docstrings(
1169
+ """
1170
+ CLIP Text Model with a projection layer on top (a linear layer on top of the pooled output).
1171
+ """,
1172
+ CLIP_START_DOCSTRING,
1173
+ )
1174
+ class CLIPTextModelWithProjection(CLIPPreTrainedModel):
1175
+ config_class = CLIPTextConfig
1176
+
1177
+ _no_split_modules = ["CLIPEncoderLayer"]
1178
+
1179
+ def __init__(self, config: CLIPTextConfig):
1180
+ super().__init__(config)
1181
+
1182
+ self.text_model = CLIPTextTransformer(config)
1183
+
1184
+ self.text_projection = nn.Linear(config.hidden_size, config.projection_dim, bias=False)
1185
+
1186
+ # Initialize weights and apply final processing
1187
+ self.post_init()
1188
+
1189
+ def get_input_embeddings(self) -> nn.Module:
1190
+ return self.text_model.embeddings.token_embedding
1191
+
1192
+ def set_input_embeddings(self, value):
1193
+ self.text_model.embeddings.token_embedding = value
1194
+
1195
+ @add_start_docstrings_to_model_forward(CLIP_TEXT_INPUTS_DOCSTRING)
1196
+ @replace_return_docstrings(output_type=CLIPTextModelOutput, config_class=CLIPTextConfig)
1197
+ def forward(
1198
+ self,
1199
+ input_ids: Optional[torch.Tensor] = None,
1200
+ attention_mask: Optional[torch.Tensor] = None,
1201
+ position_ids: Optional[torch.Tensor] = None,
1202
+ output_attentions: Optional[bool] = None,
1203
+ output_hidden_states: Optional[bool] = None,
1204
+ return_dict: Optional[bool] = None,
1205
+ ) -> Union[Tuple, CLIPTextModelOutput]:
1206
+ r"""
1207
+ Returns:
1208
+
1209
+ Examples:
1210
+
1211
+ ```python
1212
+ >>> from transformers import AutoTokenizer, CLIPTextModelWithProjection
1213
+
1214
+ >>> model = CLIPTextModelWithProjection.from_pretrained("openai/clip-vit-base-patch32")
1215
+ >>> tokenizer = AutoTokenizer.from_pretrained("openai/clip-vit-base-patch32")
1216
+
1217
+ >>> inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="pt")
1218
+
1219
+ >>> outputs = model(**inputs)
1220
+ >>> text_embeds = outputs.text_embeds
1221
+ ```"""
1222
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1223
+
1224
+ text_outputs = self.text_model(
1225
+ input_ids=input_ids,
1226
+ attention_mask=attention_mask,
1227
+ position_ids=position_ids,
1228
+ output_attentions=output_attentions,
1229
+ output_hidden_states=output_hidden_states,
1230
+ return_dict=return_dict,
1231
+ )
1232
+
1233
+ pooled_output = text_outputs[1]
1234
+
1235
+ text_embeds = self.text_projection(pooled_output)
1236
+
1237
+ if not return_dict:
1238
+ outputs = (text_embeds, text_outputs[0]) + text_outputs[2:]
1239
+ return tuple(output for output in outputs if output is not None)
1240
+
1241
+ return CLIPTextModelOutput(
1242
+ text_embeds=text_embeds,
1243
+ last_hidden_state=text_outputs.last_hidden_state,
1244
+ hidden_states=text_outputs.hidden_states,
1245
+ attentions=text_outputs.attentions,
1246
+ )
1247
+
1248
+
1249
+ @add_start_docstrings(
1250
+ """
1251
+ CLIP Vision Model with a projection layer on top (a linear layer on top of the pooled output).
1252
+ """,
1253
+ CLIP_START_DOCSTRING,
1254
+ )
1255
+ class CLIPVisionModelWithProjection(CLIPPreTrainedModel):
1256
+ config_class = CLIPVisionConfig
1257
+ main_input_name = "pixel_values"
1258
+
1259
+ def __init__(self, config: CLIPVisionConfig):
1260
+ super().__init__(config)
1261
+
1262
+ self.vision_model = CLIPVisionTransformer(config)
1263
+
1264
+ self.visual_projection = nn.Linear(config.hidden_size, config.projection_dim, bias=False)
1265
+
1266
+ # Initialize weights and apply final processing
1267
+ self.post_init()
1268
+
1269
+ def get_input_embeddings(self) -> nn.Module:
1270
+ return self.vision_model.embeddings.patch_embedding
1271
+
1272
+ @add_start_docstrings_to_model_forward(CLIP_VISION_INPUTS_DOCSTRING)
1273
+ @replace_return_docstrings(output_type=CLIPVisionModelOutput, config_class=CLIPVisionConfig)
1274
+ def forward(
1275
+ self,
1276
+ pixel_values: Optional[torch.FloatTensor] = None,
1277
+ output_attentions: Optional[bool] = None,
1278
+ output_hidden_states: Optional[bool] = None,
1279
+ return_dict: Optional[bool] = None,
1280
+ ) -> Union[Tuple, CLIPVisionModelOutput]:
1281
+ r"""
1282
+ Returns:
1283
+
1284
+ Examples:
1285
+
1286
+ ```python
1287
+ >>> from PIL import Image
1288
+ >>> import requests
1289
+ >>> from transformers import AutoProcessor, CLIPVisionModelWithProjection
1290
+
1291
+ >>> model = CLIPVisionModelWithProjection.from_pretrained("openai/clip-vit-base-patch32")
1292
+ >>> processor = AutoProcessor.from_pretrained("openai/clip-vit-base-patch32")
1293
+
1294
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
1295
+ >>> image = Image.open(requests.get(url, stream=True).raw)
1296
+
1297
+ >>> inputs = processor(images=image, return_tensors="pt")
1298
+
1299
+ >>> outputs = model(**inputs)
1300
+ >>> image_embeds = outputs.image_embeds
1301
+ ```"""
1302
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1303
+
1304
+ vision_outputs = self.vision_model(
1305
+ pixel_values=pixel_values,
1306
+ output_attentions=output_attentions,
1307
+ output_hidden_states=output_hidden_states,
1308
+ return_dict=return_dict,
1309
+ )
1310
+
1311
+ pooled_output = vision_outputs[1] # pooled_output
1312
+
1313
+ image_embeds = self.visual_projection(pooled_output)
1314
+
1315
+ if not return_dict:
1316
+ outputs = (image_embeds, vision_outputs[0]) + vision_outputs[2:]
1317
+ return tuple(output for output in outputs if output is not None)
1318
+
1319
+ return CLIPVisionModelOutput(
1320
+ image_embeds=image_embeds,
1321
+ last_hidden_state=vision_outputs.last_hidden_state,
1322
+ hidden_states=vision_outputs.hidden_states,
1323
+ attentions=vision_outputs.attentions,
1324
+ )