Andro0s commited on
Commit
d3efb50
·
verified ·
1 Parent(s): 5033d8e

Upload 5 files

Browse files
model/open_clip/__init__ (1).py ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ from .model import CLIP
2
+ from .tokenizer import tokenize
3
+
4
+ __all__ = ["CLIP", "tokenize"]
model/open_clip/bpe_simple_vocab_16e6.txt.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:924691ac288e54409236115652ad4aa250f48203de50a9e4722a6ecd48d6804a
3
+ size 1356917
model/open_clip/model.py ADDED
@@ -0,0 +1,206 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ CLIP Model
2
+
3
+ Adapted from https://github.com/openai/CLIP. Originally MIT License, Copyright (c) 2021 OpenAI.
4
+ """
5
+ from dataclasses import dataclass
6
+ from typing import Optional, Tuple, Union
7
+
8
+ import numpy as np
9
+ import torch
10
+ import torch.nn.functional as F
11
+ from torch import nn
12
+
13
+ from .transformer import LayerNormFp32, LayerNorm, QuickGELU, VisionTransformer, TextTransformer
14
+
15
+
16
+ @dataclass
17
+ class CLIPVisionCfg:
18
+ layers: Union[Tuple[int, int, int, int], int] = 12
19
+ width: int = 768
20
+ head_width: int = 64
21
+ mlp_ratio: float = 4.0
22
+ patch_size: int = 16
23
+ image_size: Union[Tuple[int, int], int] = 224
24
+
25
+ ls_init_value: Optional[float] = None # layer scale initial value
26
+ patch_dropout: float = 0. # what fraction of patches to dropout during training (0 would mean disabled and no patches dropped) - 0.5 to 0.75 recommended in the paper for optimal results
27
+ input_patchnorm: bool = False # whether to use dual patchnorm - would only apply the input layernorm on each patch, as post-layernorm already exist in original clip vit design
28
+ global_average_pool: bool = False # whether to global average pool the last embedding layer, instead of using CLS token (https://arxiv.org/abs/2205.01580)
29
+ attentional_pool: bool = False # whether to use attentional pooler in the last embedding layer
30
+ n_queries: int = 256 # n_queries for attentional pooler
31
+ attn_pooler_heads: int = 8 # n heads for attentional_pooling
32
+ output_tokens: bool = False
33
+
34
+ timm_model_name: str = None # a valid model name overrides layers, width, patch_size
35
+ timm_model_pretrained: bool = False # use (imagenet) pretrained weights for named model
36
+ timm_pool: str = 'avg' # feature pooling for timm model ('abs_attn', 'rot_attn', 'avg', '')
37
+ timm_proj: str = 'linear' # linear projection for timm model output ('linear', 'mlp', '')
38
+ timm_proj_bias: bool = False # enable bias final projection
39
+ timm_drop: float = 0. # head dropout
40
+ timm_drop_path: Optional[float] = None # backbone stochastic depth
41
+
42
+
43
+ @dataclass
44
+ class CLIPTextCfg:
45
+ context_length: int = 77
46
+ vocab_size: int = 49408
47
+ width: int = 512
48
+ heads: int = 8
49
+ layers: int = 12
50
+ ls_init_value: Optional[float] = None # layer scale initial value
51
+ hf_model_name: str = None
52
+ hf_tokenizer_name: str = None
53
+ hf_model_pretrained: bool = True
54
+ proj: str = 'mlp'
55
+ pooler_type: str = 'mean_pooler'
56
+ embed_cls: bool = False
57
+ pad_id: int = 0
58
+ output_tokens: bool = False
59
+
60
+
61
+ def get_cast_dtype(precision: str):
62
+ cast_dtype = None
63
+ if precision == 'bf16':
64
+ cast_dtype = torch.bfloat16
65
+ elif precision == 'fp16':
66
+ cast_dtype = torch.float16
67
+ return cast_dtype
68
+
69
+
70
+ def _build_vision_tower(
71
+ embed_dim: int,
72
+ vision_cfg: CLIPVisionCfg,
73
+ quick_gelu: bool = False,
74
+ cast_dtype: Optional[torch.dtype] = None
75
+ ):
76
+ if isinstance(vision_cfg, dict):
77
+ vision_cfg = CLIPVisionCfg(**vision_cfg)
78
+
79
+ # OpenAI models are pretrained w/ QuickGELU but native nn.GELU is both faster and more
80
+ # memory efficient in recent PyTorch releases (>= 1.10).
81
+ # NOTE: timm models always use native GELU regardless of quick_gelu flag.
82
+ act_layer = QuickGELU if quick_gelu else nn.GELU
83
+
84
+ vision_heads = vision_cfg.width // vision_cfg.head_width
85
+ norm_layer = LayerNormFp32 if cast_dtype in (torch.float16, torch.bfloat16) else LayerNorm
86
+ visual = VisionTransformer(
87
+ image_size=vision_cfg.image_size,
88
+ patch_size=vision_cfg.patch_size,
89
+ width=vision_cfg.width,
90
+ layers=vision_cfg.layers,
91
+ heads=vision_heads,
92
+ mlp_ratio=vision_cfg.mlp_ratio,
93
+ ls_init_value=vision_cfg.ls_init_value,
94
+ patch_dropout=vision_cfg.patch_dropout,
95
+ input_patchnorm=vision_cfg.input_patchnorm,
96
+ global_average_pool=vision_cfg.global_average_pool,
97
+ attentional_pool=vision_cfg.attentional_pool,
98
+ n_queries=vision_cfg.n_queries,
99
+ attn_pooler_heads=vision_cfg.attn_pooler_heads,
100
+ output_tokens=vision_cfg.output_tokens,
101
+ output_dim=embed_dim,
102
+ act_layer=act_layer,
103
+ norm_layer=norm_layer,
104
+ )
105
+
106
+ return visual
107
+
108
+
109
+ def _build_text_tower(
110
+ embed_dim: int,
111
+ text_cfg: CLIPTextCfg,
112
+ quick_gelu: bool = False,
113
+ cast_dtype: Optional[torch.dtype] = None,
114
+ ):
115
+ if isinstance(text_cfg, dict):
116
+ text_cfg = CLIPTextCfg(**text_cfg)
117
+
118
+ act_layer = QuickGELU if quick_gelu else nn.GELU
119
+ norm_layer = LayerNormFp32 if cast_dtype in (torch.float16, torch.bfloat16) else LayerNorm
120
+
121
+ text = TextTransformer(
122
+ context_length=text_cfg.context_length,
123
+ vocab_size=text_cfg.vocab_size,
124
+ width=text_cfg.width,
125
+ heads=text_cfg.heads,
126
+ layers=text_cfg.layers,
127
+ ls_init_value=text_cfg.ls_init_value,
128
+ output_dim=embed_dim,
129
+ embed_cls=text_cfg.embed_cls,
130
+ output_tokens=text_cfg.output_tokens,
131
+ pad_id=text_cfg.pad_id,
132
+ act_layer=act_layer,
133
+ norm_layer=norm_layer,
134
+ )
135
+ return text
136
+
137
+
138
+ class CLIP(nn.Module):
139
+ output_dict: torch.jit.Final[bool]
140
+
141
+ def __init__(
142
+ self,
143
+ embed_dim: int,
144
+ vision_cfg: CLIPVisionCfg,
145
+ text_cfg: CLIPTextCfg,
146
+ quick_gelu: bool = False,
147
+ cast_dtype: Optional[torch.dtype] = None,
148
+ output_dict: bool = False,
149
+ ):
150
+ super().__init__()
151
+ self.output_dict = output_dict
152
+ self.visual = _build_vision_tower(embed_dim, vision_cfg, quick_gelu, cast_dtype)
153
+
154
+ text = _build_text_tower(embed_dim, text_cfg, quick_gelu, cast_dtype)
155
+ self.transformer = text.transformer
156
+ self.context_length = text.context_length
157
+ self.vocab_size = text.vocab_size
158
+ self.token_embedding = text.token_embedding
159
+ self.positional_embedding = text.positional_embedding
160
+ self.ln_final = text.ln_final
161
+ self.text_projection = text.text_projection
162
+ self.register_buffer('attn_mask', text.attn_mask, persistent=False)
163
+
164
+ self.logit_scale = nn.Parameter(torch.ones([]) * np.log(1 / 0.07))
165
+
166
+ def lock_image_tower(self, unlocked_groups=0, freeze_bn_stats=False):
167
+ # lock image tower as per LiT - https://arxiv.org/abs/2111.07991
168
+ self.visual.lock(unlocked_groups=unlocked_groups, freeze_bn_stats=freeze_bn_stats)
169
+
170
+ @torch.jit.ignore
171
+ def set_grad_checkpointing(self, enable=True):
172
+ self.visual.set_grad_checkpointing(enable)
173
+ self.transformer.grad_checkpointing = enable
174
+
175
+ def encode_image(self, image, normalize: bool = False):
176
+ features = self.visual(image)
177
+ return F.normalize(features, dim=-1) if normalize else features
178
+
179
+ def encode_text(self, text, normalize: bool = False):
180
+ cast_dtype = self.transformer.get_cast_dtype()
181
+
182
+ x = self.token_embedding(text).to(cast_dtype) # [batch_size, n_ctx, d_model]
183
+
184
+ x = x + self.positional_embedding.to(cast_dtype)
185
+ x = x.permute(1, 0, 2) # NLD -> LND
186
+ x = self.transformer(x, attn_mask=self.attn_mask)
187
+ x = x.permute(1, 0, 2) # LND -> NLD
188
+ x = self.ln_final(x) # [batch_size, n_ctx, transformer.width]
189
+ # take features from the eot embedding (eot_token is the highest number in each sequence)
190
+ x = x[torch.arange(x.shape[0]), text.argmax(dim=-1)] @ self.text_projection
191
+ return F.normalize(x, dim=-1) if normalize else x
192
+
193
+ def forward(
194
+ self,
195
+ image: Optional[torch.Tensor] = None,
196
+ text: Optional[torch.Tensor] = None,
197
+ ):
198
+ image_features = self.encode_image(image, normalize=True) if image is not None else None
199
+ text_features = self.encode_text(text, normalize=True) if text is not None else None
200
+ if self.output_dict:
201
+ return {
202
+ "image_features": image_features,
203
+ "text_features": text_features,
204
+ "logit_scale": self.logit_scale.exp()
205
+ }
206
+ return image_features, text_features, self.logit_scale.exp()
model/open_clip/tokenizer.py ADDED
@@ -0,0 +1,214 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ CLIP tokenizer
2
+
3
+ Copied from https://github.com/openai/CLIP. Originally MIT License, Copyright (c) 2021 OpenAI.
4
+ """
5
+ import gzip
6
+ import html
7
+ import os
8
+ from functools import lru_cache
9
+ from typing import Union, List
10
+
11
+ import ftfy
12
+ import regex as re
13
+ import torch
14
+
15
+ # https://stackoverflow.com/q/62691279
16
+ import os
17
+ os.environ["TOKENIZERS_PARALLELISM"] = "false"
18
+
19
+
20
+ @lru_cache()
21
+ def default_bpe():
22
+ return os.path.join(os.path.dirname(os.path.abspath(__file__)), "bpe_simple_vocab_16e6.txt.gz")
23
+
24
+
25
+ @lru_cache()
26
+ def bytes_to_unicode():
27
+ """
28
+ Returns list of utf-8 byte and a corresponding list of unicode strings.
29
+ The reversible bpe codes work on unicode strings.
30
+ This means you need a large # of unicode characters in your vocab if you want to avoid UNKs.
31
+ When you're at something like a 10B token dataset you end up needing around 5K for decent coverage.
32
+ This is a significant percentage of your normal, say, 32K bpe vocab.
33
+ To avoid that, we want lookup tables between utf-8 bytes and unicode strings.
34
+ And avoids mapping to whitespace/control characters the bpe code barfs on.
35
+ """
36
+ bs = list(range(ord("!"), ord("~")+1))+list(range(ord("¡"), ord("¬")+1))+list(range(ord("®"), ord("ÿ")+1))
37
+ cs = bs[:]
38
+ n = 0
39
+ for b in range(2**8):
40
+ if b not in bs:
41
+ bs.append(b)
42
+ cs.append(2**8+n)
43
+ n += 1
44
+ cs = [chr(n) for n in cs]
45
+ return dict(zip(bs, cs))
46
+
47
+
48
+ def get_pairs(word):
49
+ """Return set of symbol pairs in a word.
50
+ Word is represented as tuple of symbols (symbols being variable-length strings).
51
+ """
52
+ pairs = set()
53
+ prev_char = word[0]
54
+ for char in word[1:]:
55
+ pairs.add((prev_char, char))
56
+ prev_char = char
57
+ return pairs
58
+
59
+
60
+ def basic_clean(text):
61
+ text = ftfy.fix_text(text)
62
+ text = html.unescape(html.unescape(text))
63
+ return text.strip()
64
+
65
+
66
+ def whitespace_clean(text):
67
+ text = re.sub(r'\s+', ' ', text)
68
+ text = text.strip()
69
+ return text
70
+
71
+
72
+ class SimpleTokenizer(object):
73
+ def __init__(self, bpe_path: str = default_bpe(), special_tokens=None):
74
+ self.byte_encoder = bytes_to_unicode()
75
+ self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
76
+ merges = gzip.open(bpe_path).read().decode("utf-8").split('\n')
77
+ merges = merges[1:49152-256-2+1]
78
+ merges = [tuple(merge.split()) for merge in merges]
79
+ vocab = list(bytes_to_unicode().values())
80
+ vocab = vocab + [v+'</w>' for v in vocab]
81
+ for merge in merges:
82
+ vocab.append(''.join(merge))
83
+ if not special_tokens:
84
+ special_tokens = ['<start_of_text>', '<end_of_text>']
85
+ else:
86
+ special_tokens = ['<start_of_text>', '<end_of_text>'] + special_tokens
87
+ vocab.extend(special_tokens)
88
+ self.encoder = dict(zip(vocab, range(len(vocab))))
89
+ self.decoder = {v: k for k, v in self.encoder.items()}
90
+ self.bpe_ranks = dict(zip(merges, range(len(merges))))
91
+ self.cache = {t:t for t in special_tokens}
92
+ special = "|".join(special_tokens)
93
+ self.pat = re.compile(special + r"""|'s|'t|'re|'ve|'m|'ll|'d|[\p{L}]+|[\p{N}]|[^\s\p{L}\p{N}]+""", re.IGNORECASE)
94
+
95
+ self.vocab_size = len(self.encoder)
96
+ self.all_special_ids = [self.encoder[t] for t in special_tokens]
97
+
98
+ def bpe(self, token):
99
+ if token in self.cache:
100
+ return self.cache[token]
101
+ word = tuple(token[:-1]) + ( token[-1] + '</w>',)
102
+ pairs = get_pairs(word)
103
+
104
+ if not pairs:
105
+ return token+'</w>'
106
+
107
+ while True:
108
+ bigram = min(pairs, key = lambda pair: self.bpe_ranks.get(pair, float('inf')))
109
+ if bigram not in self.bpe_ranks:
110
+ break
111
+ first, second = bigram
112
+ new_word = []
113
+ i = 0
114
+ while i < len(word):
115
+ try:
116
+ j = word.index(first, i)
117
+ new_word.extend(word[i:j])
118
+ i = j
119
+ except:
120
+ new_word.extend(word[i:])
121
+ break
122
+
123
+ if word[i] == first and i < len(word)-1 and word[i+1] == second:
124
+ new_word.append(first+second)
125
+ i += 2
126
+ else:
127
+ new_word.append(word[i])
128
+ i += 1
129
+ new_word = tuple(new_word)
130
+ word = new_word
131
+ if len(word) == 1:
132
+ break
133
+ else:
134
+ pairs = get_pairs(word)
135
+ word = ' '.join(word)
136
+ self.cache[token] = word
137
+ return word
138
+
139
+ def encode(self, text):
140
+ bpe_tokens = []
141
+ text = whitespace_clean(basic_clean(text)).lower()
142
+ for token in re.findall(self.pat, text):
143
+ token = ''.join(self.byte_encoder[b] for b in token.encode('utf-8'))
144
+ bpe_tokens.extend(self.encoder[bpe_token] for bpe_token in self.bpe(token).split(' '))
145
+ return bpe_tokens
146
+
147
+ def decode(self, tokens):
148
+ text = ''.join([self.decoder[token] for token in tokens])
149
+ text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors="replace").replace('</w>', ' ')
150
+ return text
151
+
152
+
153
+ _tokenizer = SimpleTokenizer()
154
+
155
+ def decode(output_ids: torch.Tensor):
156
+ output_ids = output_ids.cpu().numpy()
157
+ return _tokenizer.decode(output_ids)
158
+
159
+ def tokenize(texts: Union[str, List[str]], context_length: int = 77) -> torch.LongTensor:
160
+ """
161
+ Returns the tokenized representation of given input string(s)
162
+
163
+ Parameters
164
+ ----------
165
+ texts : Union[str, List[str]]
166
+ An input string or a list of input strings to tokenize
167
+ context_length : int
168
+ The context length to use; all CLIP models use 77 as the context length
169
+
170
+ Returns
171
+ -------
172
+ A two-dimensional tensor containing the resulting tokens, shape = [number of input strings, context_length]
173
+ """
174
+ if isinstance(texts, str):
175
+ texts = [texts]
176
+
177
+ sot_token = _tokenizer.encoder["<start_of_text>"]
178
+ eot_token = _tokenizer.encoder["<end_of_text>"]
179
+ all_tokens = [[sot_token] + _tokenizer.encode(text) + [eot_token] for text in texts]
180
+ result = torch.zeros(len(all_tokens), context_length, dtype=torch.long)
181
+
182
+ for i, tokens in enumerate(all_tokens):
183
+ if len(tokens) > context_length:
184
+ tokens = tokens[:context_length] # Truncate
185
+ tokens[-1] = eot_token
186
+ result[i, :len(tokens)] = torch.tensor(tokens)
187
+
188
+ return result
189
+
190
+
191
+ class HFTokenizer:
192
+ """HuggingFace tokenizer wrapper"""
193
+
194
+ def __init__(self, tokenizer_name: str):
195
+ from transformers import AutoTokenizer
196
+ self.tokenizer = AutoTokenizer.from_pretrained(tokenizer_name)
197
+
198
+ def save_pretrained(self, dest):
199
+ self.tokenizer.save_pretrained(dest)
200
+
201
+ def __call__(self, texts: Union[str, List[str]], context_length: int = 77) -> torch.Tensor:
202
+ # same cleaning as for default tokenizer, except lowercasing
203
+ # adding lower (for case-sensitive tokenizers) will make it more robust but less sensitive to nuance
204
+ if isinstance(texts, str):
205
+ texts = [texts]
206
+ texts = [whitespace_clean(basic_clean(text)) for text in texts]
207
+ input_ids = self.tokenizer(
208
+ texts,
209
+ return_tensors='pt',
210
+ max_length=context_length,
211
+ padding='max_length',
212
+ truncation=True,
213
+ ).input_ids
214
+ return input_ids
model/open_clip/transformer.py ADDED
@@ -0,0 +1,736 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import collections
2
+ from collections import OrderedDict
3
+ import math
4
+ from typing import Callable, Optional, Sequence, Tuple
5
+ from itertools import repeat
6
+
7
+ import torch
8
+ from torch import nn
9
+ from torch.nn import functional as F
10
+ from torch.utils.checkpoint import checkpoint
11
+
12
+ # From PyTorch internals
13
+ def _ntuple(n):
14
+ def parse(x):
15
+ if isinstance(x, collections.abc.Iterable):
16
+ return x
17
+ return tuple(repeat(x, n))
18
+ return parse
19
+
20
+ to_2tuple = _ntuple(2)
21
+
22
+
23
+ class LayerNormFp32(nn.LayerNorm):
24
+ """Subclass torch's LayerNorm to handle fp16 (by casting to float32 and back)."""
25
+
26
+ def forward(self, x: torch.Tensor):
27
+ orig_type = x.dtype
28
+ x = F.layer_norm(x.to(torch.float32), self.normalized_shape, self.weight, self.bias, self.eps)
29
+ return x.to(orig_type)
30
+
31
+
32
+ class LayerNorm(nn.LayerNorm):
33
+ """Subclass torch's LayerNorm (with cast back to input dtype)."""
34
+
35
+ def forward(self, x: torch.Tensor):
36
+ orig_type = x.dtype
37
+ x = F.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps)
38
+ return x.to(orig_type)
39
+
40
+
41
+ class QuickGELU(nn.Module):
42
+ # NOTE This is slower than nn.GELU or nn.SiLU and uses more GPU memory
43
+ def forward(self, x: torch.Tensor):
44
+ return x * torch.sigmoid(1.702 * x)
45
+
46
+
47
+ class LayerScale(nn.Module):
48
+ def __init__(self, dim, init_values=1e-5, inplace=False):
49
+ super().__init__()
50
+ self.inplace = inplace
51
+ self.gamma = nn.Parameter(init_values * torch.ones(dim))
52
+
53
+ def forward(self, x):
54
+ return x.mul_(self.gamma) if self.inplace else x * self.gamma
55
+
56
+
57
+ class PatchDropout(nn.Module):
58
+ """
59
+ https://arxiv.org/abs/2212.00794
60
+ """
61
+
62
+ def __init__(self, prob, exclude_first_token=True):
63
+ super().__init__()
64
+ assert 0 <= prob < 1.
65
+ self.prob = prob
66
+ self.exclude_first_token = exclude_first_token # exclude CLS token
67
+
68
+ def forward(self, x):
69
+ if not self.training or self.prob == 0.:
70
+ return x
71
+
72
+ if self.exclude_first_token:
73
+ cls_tokens, x = x[:, :1], x[:, 1:]
74
+ else:
75
+ cls_tokens = torch.jit.annotate(torch.Tensor, x[:, :1])
76
+
77
+ batch = x.size()[0]
78
+ num_tokens = x.size()[1]
79
+
80
+ batch_indices = torch.arange(batch)
81
+ batch_indices = batch_indices[..., None]
82
+
83
+ keep_prob = 1 - self.prob
84
+ num_patches_keep = max(1, int(num_tokens * keep_prob))
85
+
86
+ rand = torch.randn(batch, num_tokens)
87
+ patch_indices_keep = rand.topk(num_patches_keep, dim=-1).indices
88
+
89
+ x = x[batch_indices, patch_indices_keep]
90
+
91
+ if self.exclude_first_token:
92
+ x = torch.cat((cls_tokens, x), dim=1)
93
+
94
+ return x
95
+
96
+
97
+ class Attention(nn.Module):
98
+ def __init__(
99
+ self,
100
+ dim,
101
+ num_heads=8,
102
+ qkv_bias=True,
103
+ scaled_cosine=False,
104
+ scale_heads=False,
105
+ logit_scale_max=math.log(1. / 0.01),
106
+ attn_drop=0.,
107
+ proj_drop=0.
108
+ ):
109
+ super().__init__()
110
+ self.scaled_cosine = scaled_cosine
111
+ self.scale_heads = scale_heads
112
+ assert dim % num_heads == 0, 'dim should be divisible by num_heads'
113
+ self.num_heads = num_heads
114
+ self.head_dim = dim // num_heads
115
+ self.scale = self.head_dim ** -0.5
116
+ self.logit_scale_max = logit_scale_max
117
+
118
+ # keeping in_proj in this form (instead of nn.Linear) to match weight scheme of original
119
+ self.in_proj_weight = nn.Parameter(torch.randn((dim * 3, dim)) * self.scale)
120
+ if qkv_bias:
121
+ self.in_proj_bias = nn.Parameter(torch.zeros(dim * 3))
122
+ else:
123
+ self.in_proj_bias = None
124
+
125
+ if self.scaled_cosine:
126
+ self.logit_scale = nn.Parameter(torch.log(10 * torch.ones((num_heads, 1, 1))))
127
+ else:
128
+ self.logit_scale = None
129
+ self.attn_drop = nn.Dropout(attn_drop)
130
+ if self.scale_heads:
131
+ self.head_scale = nn.Parameter(torch.ones((num_heads, 1, 1)))
132
+ else:
133
+ self.head_scale = None
134
+ self.out_proj = nn.Linear(dim, dim)
135
+ self.out_drop = nn.Dropout(proj_drop)
136
+
137
+ def forward(self, x, attn_mask: Optional[torch.Tensor] = None):
138
+ L, N, C = x.shape
139
+ q, k, v = F.linear(x, self.in_proj_weight, self.in_proj_bias).chunk(3, dim=-1)
140
+ q = q.contiguous().view(L, N * self.num_heads, -1).transpose(0, 1)
141
+ k = k.contiguous().view(L, N * self.num_heads, -1).transpose(0, 1)
142
+ v = v.contiguous().view(L, N * self.num_heads, -1).transpose(0, 1)
143
+
144
+ if self.logit_scale is not None:
145
+ attn = torch.bmm(F.normalize(q, dim=-1), F.normalize(k, dim=-1).transpose(-1, -2))
146
+ logit_scale = torch.clamp(self.logit_scale, max=self.logit_scale_max).exp()
147
+ attn = attn.view(N, self.num_heads, L, L) * logit_scale
148
+ attn = attn.view(-1, L, L)
149
+ else:
150
+ q = q * self.scale
151
+ attn = torch.bmm(q, k.transpose(-1, -2))
152
+
153
+ if attn_mask is not None:
154
+ if attn_mask.dtype == torch.bool:
155
+ new_attn_mask = torch.zeros_like(attn_mask, dtype=q.dtype)
156
+ new_attn_mask.masked_fill_(attn_mask, float("-inf"))
157
+ attn_mask = new_attn_mask
158
+ attn += attn_mask
159
+
160
+ attn = attn.softmax(dim=-1)
161
+ attn = self.attn_drop(attn)
162
+
163
+ x = torch.bmm(attn, v)
164
+ if self.head_scale is not None:
165
+ x = x.view(N, self.num_heads, L, C) * self.head_scale
166
+ x = x.view(-1, L, C)
167
+ x = x.transpose(0, 1).reshape(L, N, C)
168
+ x = self.out_proj(x)
169
+ x = self.out_drop(x)
170
+ return x
171
+
172
+
173
+ class AttentionalPooler(nn.Module):
174
+ def __init__(
175
+ self,
176
+ d_model: int,
177
+ context_dim: int,
178
+ n_head: int = 8,
179
+ n_queries: int = 256,
180
+ norm_layer: Callable = LayerNorm
181
+ ):
182
+ super().__init__()
183
+ self.query = nn.Parameter(torch.randn(n_queries, d_model))
184
+ self.attn = nn.MultiheadAttention(d_model, n_head, kdim=context_dim, vdim=context_dim)
185
+ self.ln_q = norm_layer(d_model)
186
+ self.ln_k = norm_layer(context_dim)
187
+
188
+ def forward(self, x: torch.Tensor):
189
+ x = self.ln_k(x).permute(1, 0, 2) # NLD -> LND
190
+ N = x.shape[1]
191
+ q = self.ln_q(self.query)
192
+ out = self.attn(self._repeat(q, N), x, x, need_weights=False)[0]
193
+ return out.permute(1, 0, 2) # LND -> NLD
194
+
195
+ def _repeat(self, query, N: int):
196
+ return query.unsqueeze(1).repeat(1, N, 1)
197
+
198
+
199
+ class ResidualAttentionBlock(nn.Module):
200
+ def __init__(
201
+ self,
202
+ d_model: int,
203
+ n_head: int,
204
+ mlp_ratio: float = 4.0,
205
+ ls_init_value: float = None,
206
+ act_layer: Callable = nn.GELU,
207
+ norm_layer: Callable = LayerNorm,
208
+ is_cross_attention: bool = False,
209
+ ):
210
+ super().__init__()
211
+
212
+ self.ln_1 = norm_layer(d_model)
213
+ self.attn = nn.MultiheadAttention(d_model, n_head)
214
+ self.ls_1 = LayerScale(d_model, ls_init_value) if ls_init_value is not None else nn.Identity()
215
+ if is_cross_attention:
216
+ self.ln_1_kv = norm_layer(d_model)
217
+
218
+ self.ln_2 = norm_layer(d_model)
219
+ mlp_width = int(d_model * mlp_ratio)
220
+ self.mlp = nn.Sequential(OrderedDict([
221
+ ("c_fc", nn.Linear(d_model, mlp_width)),
222
+ ("gelu", act_layer()),
223
+ ("c_proj", nn.Linear(mlp_width, d_model))
224
+ ]))
225
+ self.ls_2 = LayerScale(d_model, ls_init_value) if ls_init_value is not None else nn.Identity()
226
+
227
+ def attention(
228
+ self,
229
+ q_x: torch.Tensor,
230
+ k_x: Optional[torch.Tensor] = None,
231
+ v_x: Optional[torch.Tensor] = None,
232
+ attn_mask: Optional[torch.Tensor] = None,
233
+ ):
234
+ k_x = k_x if k_x is not None else q_x
235
+ v_x = v_x if v_x is not None else q_x
236
+
237
+ attn_mask = attn_mask.to(q_x.dtype) if attn_mask is not None else None
238
+ return self.attn(
239
+ q_x, k_x, v_x, need_weights=False, attn_mask=attn_mask
240
+ )[0]
241
+
242
+ def forward(
243
+ self,
244
+ q_x: torch.Tensor,
245
+ k_x: Optional[torch.Tensor] = None,
246
+ v_x: Optional[torch.Tensor] = None,
247
+ attn_mask: Optional[torch.Tensor] = None,
248
+ ):
249
+ k_x = self.ln_1_kv(k_x) if hasattr(self, "ln_1_kv") and k_x is not None else None
250
+ v_x = self.ln_1_kv(v_x) if hasattr(self, "ln_1_kv") and v_x is not None else None
251
+
252
+ x = q_x + self.ls_1(self.attention(q_x=self.ln_1(q_x), k_x=k_x, v_x=v_x, attn_mask=attn_mask))
253
+ x = x + self.ls_2(self.mlp(self.ln_2(x)))
254
+ return x
255
+
256
+
257
+ class CustomResidualAttentionBlock(nn.Module):
258
+ def __init__(
259
+ self,
260
+ d_model: int,
261
+ n_head: int,
262
+ mlp_ratio: float = 4.0,
263
+ ls_init_value: float = None,
264
+ act_layer: Callable = nn.GELU,
265
+ norm_layer: Callable = LayerNorm,
266
+ scale_cosine_attn: bool = False,
267
+ scale_heads: bool = False,
268
+ scale_attn: bool = False,
269
+ scale_fc: bool = False,
270
+ ):
271
+ super().__init__()
272
+
273
+ self.ln_1 = norm_layer(d_model)
274
+ self.attn = Attention(
275
+ d_model, n_head,
276
+ scaled_cosine=scale_cosine_attn,
277
+ scale_heads=scale_heads,
278
+ )
279
+ self.ln_attn = norm_layer(d_model) if scale_attn else nn.Identity()
280
+ self.ls_1 = LayerScale(d_model, ls_init_value) if ls_init_value is not None else nn.Identity()
281
+
282
+ self.ln_2 = norm_layer(d_model)
283
+ mlp_width = int(d_model * mlp_ratio)
284
+ self.mlp = nn.Sequential(OrderedDict([
285
+ ("c_fc", nn.Linear(d_model, mlp_width)),
286
+ ('ln', norm_layer(mlp_width) if scale_fc else nn.Identity()),
287
+ ("gelu", act_layer()),
288
+ ("c_proj", nn.Linear(mlp_width, d_model))
289
+ ]))
290
+ self.ls_2 = LayerScale(d_model, ls_init_value) if ls_init_value is not None else nn.Identity()
291
+
292
+ def forward(self, x: torch.Tensor, attn_mask: Optional[torch.Tensor] = None):
293
+ x = x + self.ls_1(self.ln_attn(self.attn(self.ln_1(x), attn_mask=attn_mask)))
294
+ x = x + self.ls_2(self.mlp(self.ln_2(x)))
295
+ return x
296
+
297
+
298
+ class Transformer(nn.Module):
299
+ def __init__(
300
+ self,
301
+ width: int,
302
+ layers: int,
303
+ heads: int,
304
+ mlp_ratio: float = 4.0,
305
+ ls_init_value: float = None,
306
+ act_layer: Callable = nn.GELU,
307
+ norm_layer: Callable = LayerNorm,
308
+ ):
309
+ super().__init__()
310
+ self.width = width
311
+ self.layers = layers
312
+ self.grad_checkpointing = False
313
+
314
+ self.resblocks = nn.ModuleList([
315
+ ResidualAttentionBlock(
316
+ width, heads, mlp_ratio, ls_init_value=ls_init_value, act_layer=act_layer, norm_layer=norm_layer)
317
+ for _ in range(layers)
318
+ ])
319
+
320
+ def get_cast_dtype(self) -> torch.dtype:
321
+ if hasattr(self.resblocks[0].mlp.c_fc, 'int8_original_dtype'):
322
+ return self.resblocks[0].mlp.c_fc.int8_original_dtype
323
+ return self.resblocks[0].mlp.c_fc.weight.dtype
324
+
325
+ def forward(self, x: torch.Tensor, attn_mask: Optional[torch.Tensor] = None):
326
+ for r in self.resblocks:
327
+ if self.grad_checkpointing and not torch.jit.is_scripting():
328
+ # TODO: handle kwargs https://github.com/pytorch/pytorch/issues/79887#issuecomment-1161758372
329
+ x = checkpoint(r, x, None, None, attn_mask)
330
+ else:
331
+ x = r(x, attn_mask=attn_mask)
332
+ return x
333
+
334
+
335
+ class VisionTransformer(nn.Module):
336
+ output_tokens: torch.jit.Final[bool]
337
+
338
+ def __init__(
339
+ self,
340
+ image_size: int,
341
+ patch_size: int,
342
+ width: int,
343
+ layers: int,
344
+ heads: int,
345
+ mlp_ratio: float,
346
+ ls_init_value: float = None,
347
+ global_average_pool: bool = False,
348
+ attentional_pool: bool = False,
349
+ n_queries: int = 256,
350
+ attn_pooler_heads: int = 8,
351
+ output_dim: int = 512,
352
+ patch_dropout: float = 0.,
353
+ input_patchnorm: bool = False,
354
+ act_layer: Callable = nn.GELU,
355
+ norm_layer: Callable = LayerNorm,
356
+ output_tokens: bool = False
357
+ ):
358
+ super().__init__()
359
+ self.output_tokens = output_tokens
360
+ image_height, image_width = self.image_size = to_2tuple(image_size)
361
+ patch_height, patch_width = self.patch_size = to_2tuple(patch_size)
362
+ self.grid_size = (image_height // patch_height, image_width // patch_width)
363
+ self.output_dim = output_dim
364
+
365
+ # whether to layernorm each patch, as done in dual patchnorm paper - https://arxiv.org/abs/2302.01327v1
366
+ self.input_patchnorm = input_patchnorm
367
+
368
+ if input_patchnorm:
369
+ patch_input_dim = patch_height * patch_width * 3
370
+ self.patchnorm_pre_ln = LayerNorm(patch_input_dim)
371
+ self.conv1 = nn.Linear(patch_input_dim, width)
372
+ else:
373
+ self.patchnorm_pre_ln = nn.Identity()
374
+ self.conv1 = nn.Conv2d(in_channels=3, out_channels=width, kernel_size=patch_size, stride=patch_size, bias=False)
375
+
376
+ # class embeddings and positional embeddings
377
+ scale = width ** -0.5
378
+ self.class_embedding = nn.Parameter(scale * torch.randn(width))
379
+ self.positional_embedding = nn.Parameter(scale * torch.randn(self.grid_size[0] * self.grid_size[1] + 1, width))
380
+
381
+ # setting a patch_dropout of 0. would mean it is disabled and this function would be the identity fn
382
+ self.patch_dropout = PatchDropout(patch_dropout) if patch_dropout > 0. else nn.Identity()
383
+
384
+ self.ln_pre = norm_layer(width)
385
+ self.transformer = Transformer(
386
+ width,
387
+ layers,
388
+ heads,
389
+ mlp_ratio,
390
+ ls_init_value=ls_init_value,
391
+ act_layer=act_layer,
392
+ norm_layer=norm_layer,
393
+ )
394
+
395
+ self.global_average_pool = global_average_pool
396
+ if attentional_pool:
397
+ self.attn_pool = AttentionalPooler(output_dim, width, n_head=attn_pooler_heads, n_queries=n_queries)
398
+ self.ln_post = norm_layer(output_dim)
399
+ self.proj = nn.Parameter(scale * torch.randn(output_dim, output_dim))
400
+ else:
401
+ self.attn_pool = None
402
+ self.ln_post = norm_layer(width)
403
+ self.proj = nn.Parameter(scale * torch.randn(width, output_dim))
404
+
405
+ self.init_parameters()
406
+
407
+ def lock(self, unlocked_groups=0, freeze_bn_stats=False):
408
+ for param in self.parameters():
409
+ param.requires_grad = False
410
+
411
+ if unlocked_groups != 0:
412
+ groups = [
413
+ [
414
+ self.conv1,
415
+ self.class_embedding,
416
+ self.positional_embedding,
417
+ self.ln_pre,
418
+ ],
419
+ *self.transformer.resblocks[:-1],
420
+ [
421
+ self.transformer.resblocks[-1],
422
+ self.ln_post,
423
+ ],
424
+ self.proj,
425
+ ]
426
+
427
+ def _unlock(x):
428
+ if isinstance(x, Sequence):
429
+ for g in x:
430
+ _unlock(g)
431
+ else:
432
+ if isinstance(x, torch.nn.Parameter):
433
+ x.requires_grad = True
434
+ else:
435
+ for p in x.parameters():
436
+ p.requires_grad = True
437
+
438
+ _unlock(groups[-unlocked_groups:])
439
+
440
+ def init_parameters(self):
441
+ # FIXME OpenAI CLIP did not define an init for the VisualTransformer
442
+ # TODO experiment if default PyTorch init, below, or alternate init is best.
443
+
444
+ # nn.init.normal_(self.class_embedding, std=self.scale)
445
+ # nn.init.normal_(self.positional_embedding, std=self.scale)
446
+ #
447
+ # proj_std = (self.transformer.width ** -0.5) * ((2 * self.transformer.layers) ** -0.5)
448
+ # attn_std = self.transformer.width ** -0.5
449
+ # fc_std = (2 * self.transformer.width) ** -0.5
450
+ # for block in self.transformer.resblocks:
451
+ # nn.init.normal_(block.attn.in_proj_weight, std=attn_std)
452
+ # nn.init.normal_(block.attn.out_proj.weight, std=proj_std)
453
+ # nn.init.normal_(block.mlp.c_fc.weight, std=fc_std)
454
+ # nn.init.normal_(block.mlp.c_proj.weight, std=proj_std)
455
+ #
456
+ # if self.text_projection is not None:
457
+ # nn.init.normal_(self.text_projection, std=self.scale)
458
+ pass
459
+
460
+ @torch.jit.ignore
461
+ def set_grad_checkpointing(self, enable=True):
462
+ self.transformer.grad_checkpointing = enable
463
+
464
+ def _global_pool(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
465
+ if self.global_average_pool:
466
+ return x.mean(dim=1), x
467
+ else:
468
+ return x[:, 0], x[:, 1:]
469
+
470
+ def forward(self, x: torch.Tensor):
471
+
472
+ # to patches - whether to use dual patchnorm - https://arxiv.org/abs/2302.01327v1
473
+ if self.input_patchnorm:
474
+ # einops - rearrange(x, 'b c (h p1) (w p2) -> b (h w) (c p1 p2)')
475
+ x = x.reshape(x.shape[0], x.shape[1], self.grid_size[0], self.patch_size[0], self.grid_size[1], self.patch_size[1])
476
+ x = x.permute(0, 2, 4, 1, 3, 5)
477
+ x = x.reshape(x.shape[0], self.grid_size[0] * self.grid_size[1], -1)
478
+ x = self.patchnorm_pre_ln(x)
479
+ x = self.conv1(x)
480
+ else:
481
+ x = self.conv1(x) # shape = [*, width, grid, grid]
482
+ x = x.reshape(x.shape[0], x.shape[1], -1) # shape = [*, width, grid ** 2]
483
+ x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width]
484
+
485
+ # class embeddings and positional embeddings
486
+ x = torch.cat(
487
+ [self.class_embedding.to(x.dtype) + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device),
488
+ x], dim=1) # shape = [*, grid ** 2 + 1, width]
489
+ x = x + self.positional_embedding.to(x.dtype)
490
+
491
+ # a patch_dropout of 0. would mean it is disabled and this function would do nothing but return what was passed in
492
+ x = self.patch_dropout(x)
493
+ x = self.ln_pre(x)
494
+
495
+ x = x.permute(1, 0, 2) # NLD -> LND
496
+ x = self.transformer(x)
497
+ x = x.permute(1, 0, 2) # LND -> NLD
498
+
499
+ if self.attn_pool is not None:
500
+ x = self.attn_pool(x)
501
+ x = self.ln_post(x)
502
+ pooled, tokens = self._global_pool(x)
503
+ else:
504
+ pooled, tokens = self._global_pool(x)
505
+ pooled = self.ln_post(pooled)
506
+
507
+ if self.proj is not None:
508
+ pooled = pooled @ self.proj
509
+
510
+ if self.output_tokens:
511
+ return pooled, tokens
512
+
513
+ return pooled
514
+
515
+
516
+ class TextTransformer(nn.Module):
517
+ output_tokens: torch.jit.Final[bool]
518
+
519
+ def __init__(
520
+ self,
521
+ context_length: int = 77,
522
+ vocab_size: int = 49408,
523
+ width: int = 512,
524
+ heads: int = 8,
525
+ layers: int = 12,
526
+ ls_init_value: float = None,
527
+ output_dim: int = 512,
528
+ act_layer: Callable = nn.GELU,
529
+ norm_layer: Callable = LayerNorm,
530
+ embed_cls: bool = False,
531
+ pad_id: int = 0,
532
+ output_tokens: bool = False,
533
+ ):
534
+ super().__init__()
535
+ self.output_tokens = output_tokens
536
+ self.num_pos = self.context_length = context_length
537
+ self.vocab_size = vocab_size
538
+ self.width = width
539
+ self.output_dim = output_dim
540
+ self.heads = heads
541
+ self.pad_id = pad_id
542
+
543
+ self.text_projection = nn.Parameter(torch.empty(width, output_dim))
544
+
545
+ if embed_cls:
546
+ self.cls_emb = nn.Parameter(torch.empty(width))
547
+ self.num_pos += 1
548
+ else:
549
+ self.cls_emb = None
550
+
551
+ self.token_embedding = nn.Embedding(vocab_size, width)
552
+ self.positional_embedding = nn.Parameter(torch.empty(self.num_pos, width))
553
+ self.transformer = Transformer(
554
+ width=width,
555
+ layers=layers,
556
+ heads=heads,
557
+ ls_init_value=ls_init_value,
558
+ act_layer=act_layer,
559
+ norm_layer=norm_layer,
560
+ )
561
+ self.ln_final = norm_layer(width)
562
+
563
+ self.register_buffer('attn_mask', self.build_attention_mask(), persistent=False)
564
+
565
+ self.init_parameters()
566
+
567
+ def init_parameters(self):
568
+ nn.init.normal_(self.token_embedding.weight, std=0.02)
569
+ nn.init.normal_(self.positional_embedding, std=0.01)
570
+ if self.cls_emb is not None:
571
+ nn.init.normal_(self.cls_emb, std=0.01)
572
+
573
+ proj_std = (self.transformer.width ** -0.5) * ((2 * self.transformer.layers) ** -0.5)
574
+ attn_std = self.transformer.width ** -0.5
575
+ fc_std = (2 * self.transformer.width) ** -0.5
576
+ for block in self.transformer.resblocks:
577
+ nn.init.normal_(block.attn.in_proj_weight, std=attn_std)
578
+ nn.init.normal_(block.attn.out_proj.weight, std=proj_std)
579
+ nn.init.normal_(block.mlp.c_fc.weight, std=fc_std)
580
+ nn.init.normal_(block.mlp.c_proj.weight, std=proj_std)
581
+
582
+ if self.text_projection is not None:
583
+ nn.init.normal_(self.text_projection, std=self.transformer.width ** -0.5)
584
+
585
+ @torch.jit.ignore
586
+ def set_grad_checkpointing(self, enable=True):
587
+ self.transformer.grad_checkpointing = enable
588
+
589
+ def build_attention_mask(self):
590
+ # lazily create causal attention mask, with full attention between the tokens
591
+ # pytorch uses additive attention mask; fill with -inf
592
+ mask = torch.empty(self.num_pos, self.num_pos)
593
+ mask.fill_(float("-inf"))
594
+ mask.triu_(1) # zero out the lower diagonal
595
+ return mask
596
+
597
+ def build_cls_mask(self, text, cast_dtype: torch.dtype):
598
+ cls_mask = (text != self.pad_id).unsqueeze(1)
599
+ cls_mask = F.pad(cls_mask, (1, 0, cls_mask.shape[2], 0), value=1.0)
600
+ additive_mask = torch.empty(cls_mask.shape, dtype=cast_dtype, device=cls_mask.device)
601
+ additive_mask.fill_(0)
602
+ additive_mask.masked_fill_(~cls_mask, float("-inf"))
603
+ additive_mask = torch.repeat_interleave(additive_mask, self.heads, 0)
604
+ return additive_mask
605
+
606
+ def _repeat(self, t, N: int):
607
+ return t.reshape(1, 1, -1).repeat(N, 1, 1)
608
+
609
+ def forward(self, text):
610
+ cast_dtype = self.transformer.get_cast_dtype()
611
+ seq_len = text.shape[1]
612
+
613
+ x = self.token_embedding(text).to(cast_dtype) # [batch_size, n_ctx, d_model]
614
+ attn_mask = self.attn_mask
615
+ if self.cls_emb is not None:
616
+ seq_len += 1
617
+ x = torch.cat([x, self._repeat(self.cls_emb, x.shape[0])], dim=1)
618
+ cls_mask = self.build_cls_mask(text, cast_dtype)
619
+ attn_mask = attn_mask[None, :seq_len, :seq_len] + cls_mask[:, :seq_len, :seq_len]
620
+
621
+ x = x + self.positional_embedding[:seq_len].to(cast_dtype)
622
+ x = x.permute(1, 0, 2) # NLD -> LND
623
+ x = self.transformer(x, attn_mask=attn_mask)
624
+ x = x.permute(1, 0, 2) # LND -> NLD
625
+
626
+ # x.shape = [batch_size, n_ctx, transformer.width]
627
+ # take features from the eot embedding (eot_token is the highest number in each sequence)
628
+ if self.cls_emb is not None:
629
+ pooled, tokens = x[:, -1], x[:, :-1]
630
+ pooled = self.ln_final(pooled)
631
+ else:
632
+ x = self.ln_final(x)
633
+ pooled, tokens = x[torch.arange(x.shape[0]), text.argmax(dim=-1)], x
634
+
635
+ if self.text_projection is not None:
636
+ pooled = pooled @ self.text_projection
637
+
638
+ if self.output_tokens:
639
+ return pooled, tokens
640
+
641
+ return pooled
642
+
643
+
644
+ class MultimodalTransformer(Transformer):
645
+ def __init__(
646
+ self,
647
+ width: int,
648
+ layers: int,
649
+ heads: int,
650
+ context_length: int = 77,
651
+ mlp_ratio: float = 4.0,
652
+ ls_init_value: float = None,
653
+ act_layer: Callable = nn.GELU,
654
+ norm_layer: Callable = LayerNorm,
655
+ output_dim: int = 512,
656
+ ):
657
+
658
+ super().__init__(
659
+ width=width,
660
+ layers=layers,
661
+ heads=heads,
662
+ mlp_ratio=mlp_ratio,
663
+ ls_init_value=ls_init_value,
664
+ act_layer=act_layer,
665
+ norm_layer=norm_layer,
666
+ )
667
+ self.context_length = context_length
668
+ self.cross_attn = nn.ModuleList([
669
+ ResidualAttentionBlock(
670
+ width,
671
+ heads,
672
+ mlp_ratio,
673
+ ls_init_value=ls_init_value,
674
+ act_layer=act_layer,
675
+ norm_layer=norm_layer,
676
+ is_cross_attention=True,
677
+ )
678
+ for _ in range(layers)
679
+ ])
680
+
681
+ self.register_buffer('attn_mask', self.build_attention_mask(), persistent=False)
682
+
683
+ self.ln_final = norm_layer(width)
684
+ self.text_projection = nn.Parameter(torch.empty(width, output_dim))
685
+
686
+ def init_parameters(self):
687
+ proj_std = (self.transformer.width ** -0.5) * ((2 * self.transformer.layers) ** -0.5)
688
+ attn_std = self.transformer.width ** -0.5
689
+ fc_std = (2 * self.transformer.width) ** -0.5
690
+ for block in self.transformer.resblocks:
691
+ nn.init.normal_(block.attn.in_proj_weight, std=attn_std)
692
+ nn.init.normal_(block.attn.out_proj.weight, std=proj_std)
693
+ nn.init.normal_(block.mlp.c_fc.weight, std=fc_std)
694
+ nn.init.normal_(block.mlp.c_proj.weight, std=proj_std)
695
+ for block in self.transformer.cross_attn:
696
+ nn.init.normal_(block.attn.in_proj_weight, std=attn_std)
697
+ nn.init.normal_(block.attn.out_proj.weight, std=proj_std)
698
+ nn.init.normal_(block.mlp.c_fc.weight, std=fc_std)
699
+ nn.init.normal_(block.mlp.c_proj.weight, std=proj_std)
700
+
701
+ if self.text_projection is not None:
702
+ nn.init.normal_(self.text_projection, std=self.transformer.width ** -0.5)
703
+
704
+ def build_attention_mask(self):
705
+ # lazily create causal attention mask, with full attention between the tokens
706
+ # pytorch uses additive attention mask; fill with -inf
707
+ mask = torch.empty(self.context_length, self.context_length)
708
+ mask.fill_(float("-inf"))
709
+ mask.triu_(1) # zero out the lower diagonal
710
+ return mask
711
+
712
+ def forward(self, image_embs, text_embs):
713
+ text_embs = text_embs.permute(1, 0, 2) # NLD -> LNDsq
714
+ image_embs = image_embs.permute(1, 0, 2) # NLD -> LND
715
+ seq_len = text_embs.shape[0]
716
+
717
+ for resblock, cross_attn in zip(self.resblocks, self.cross_attn):
718
+ if self.grad_checkpointing and not torch.jit.is_scripting():
719
+ # TODO: handle kwargs https://github.com/pytorch/pytorch/issues/79887#issuecomment-1161758372
720
+ text_embs = checkpoint(resblock, text_embs, None, None, self.attn_mask[:seq_len, :seq_len])
721
+ text_embs = checkpoint(cross_attn, text_embs, image_embs, image_embs, None)
722
+ else:
723
+ text_embs = resblock(text_embs, attn_mask=self.attn_mask[:seq_len, :seq_len])
724
+ text_embs = cross_attn(text_embs, k_x=image_embs, v_x=image_embs)
725
+
726
+ x = text_embs.permute(1, 0, 2) # LND -> NLD
727
+ x = self.ln_final(x)
728
+
729
+ if self.text_projection is not None:
730
+ x = x @ self.text_projection
731
+
732
+ return x
733
+
734
+ @torch.jit.ignore
735
+ def set_grad_checkpointing(self, enable=True):
736
+ self.grad_checkpointing = enable