|
import torch |
|
import torch.nn as nn |
|
import numpy as np |
|
from functools import partial |
|
from lib.model_zoo.common.get_model import register |
|
|
|
version = '0' |
|
symbol = 'clip' |
|
|
|
class AbstractEncoder(nn.Module): |
|
def __init__(self): |
|
super().__init__() |
|
|
|
def encode(self, *args, **kwargs): |
|
raise NotImplementedError |
|
|
|
from transformers import CLIPTokenizer, CLIPTextModel |
|
|
|
def disabled_train(self, mode=True): |
|
"""Overwrite model.train with this function to make sure train/eval mode |
|
does not change anymore.""" |
|
return self |
|
|
|
@register('clip_text_frozen', version) |
|
class FrozenCLIPTextEmbedder(AbstractEncoder): |
|
"""Uses the CLIP transformer encoder for text (from huggingface)""" |
|
def __init__(self, version="openai/clip-vit-large-patch14", device="cuda", max_length=77): |
|
super().__init__() |
|
self.tokenizer = CLIPTokenizer.from_pretrained(version) |
|
self.transformer = CLIPTextModel.from_pretrained(version) |
|
self.device = device |
|
self.max_length = max_length |
|
self.freeze() |
|
|
|
def freeze(self): |
|
self.transformer = self.transformer.eval() |
|
|
|
for param in self.parameters(): |
|
param.requires_grad = False |
|
|
|
def forward(self, text): |
|
batch_encoding = self.tokenizer(text, truncation=True, max_length=self.max_length, return_length=True, |
|
return_overflowing_tokens=False, padding="max_length", return_tensors="pt") |
|
tokens = batch_encoding["input_ids"].to(self.device) |
|
outputs = self.transformer(input_ids=tokens) |
|
z = outputs.last_hidden_state |
|
return z |
|
|
|
def encode(self, text): |
|
return self(text) |
|
|
|
from transformers import CLIPProcessor, CLIPModel |
|
|
|
@register('clip_frozen', version) |
|
class FrozenCLIP(AbstractEncoder): |
|
def __init__(self, |
|
version="openai/clip-vit-large-patch14", |
|
max_length=77, |
|
encode_type='encode_text', |
|
fp16=False, ): |
|
super().__init__() |
|
self.tokenizer = CLIPTokenizer.from_pretrained(version) |
|
self.processor = CLIPProcessor.from_pretrained(version) |
|
self.model = CLIPModel.from_pretrained(version) |
|
self.max_length = max_length |
|
self.encode_type = encode_type |
|
self.fp16 = fp16 |
|
self.freeze() |
|
|
|
def get_device(self): |
|
|
|
return self.model.text_projection.weight.device |
|
|
|
def freeze(self): |
|
self.model = self.model.eval() |
|
self.train = disabled_train |
|
for param in self.parameters(): |
|
param.requires_grad = False |
|
|
|
def encode_text_pooled(self, text): |
|
batch_encoding = self.tokenizer(text, truncation=True, max_length=self.max_length, return_length=True, |
|
return_overflowing_tokens=False, padding="max_length", return_tensors="pt") |
|
tokens = batch_encoding["input_ids"].to(self.get_device()) |
|
outputs = self.model.get_text_features(input_ids=tokens) |
|
return outputs |
|
|
|
def encode_vision_pooled(self, images): |
|
inputs = self.processor(images=images, return_tensors="pt") |
|
pixels = inputs['pixel_values'].half() if self.fp16 else inputs['pixel_values'] |
|
pixels = pixels.to(self.get_device()) |
|
return self.model.get_image_features(pixel_values=pixels) |
|
|
|
def encode_text_noproj(self, text): |
|
batch_encoding = self.tokenizer(text, truncation=True, max_length=self.max_length, return_length=True, |
|
return_overflowing_tokens=False, padding="max_length", return_tensors="pt") |
|
tokens = batch_encoding["input_ids"].to(self.get_device()) |
|
outputs = self.model.text_model(input_ids=tokens) |
|
return outputs.last_hidden_state |
|
|
|
def encode_vision_noproj(self, images): |
|
inputs = self.processor(images=images, return_tensors="pt") |
|
pixels = inputs['pixel_values'].half() if self.fp16 else inputs['pixel_values'] |
|
pixels = pixels.to(self.get_device()) |
|
outputs = self.model.vision_model(pixel_values=pixels) |
|
return outputs.last_hidden_state |
|
|
|
def encode_text(self, text): |
|
batch_encoding = self.tokenizer(text, truncation=True, max_length=self.max_length, return_length=True, |
|
return_overflowing_tokens=False, padding="max_length", return_tensors="pt") |
|
tokens = batch_encoding["input_ids"].to(self.get_device()) |
|
outputs = self.model.text_model(input_ids=tokens) |
|
z = self.model.text_projection(outputs.last_hidden_state) |
|
z_pooled = self.model.text_projection(outputs.pooler_output) |
|
z = z / torch.norm(z_pooled.unsqueeze(1), dim=-1, keepdim=True) |
|
return z |
|
|
|
def encode_vision(self, images): |
|
z = self.encode_vision_noproj(images) |
|
z = self.model.vision_model.post_layernorm(z) |
|
z = self.model.visual_projection(z) |
|
z_pooled = z[:, 0:1] |
|
|
|
z = z / torch.norm(z_pooled, dim=-1, keepdim=True) |
|
return z |
|
|
|
def encode(self, *args, **kwargs): |
|
return getattr(self, self.encode_type)(*args, **kwargs) |
|
|
|
|
|
|
|
|
|
|
|
@register('clip_vision_frozen_justin', version) |
|
class FrozenCLIPVisionEmbedder_Justin(AbstractEncoder): |
|
""" |
|
Uses the CLIP image encoder. |
|
""" |
|
def __init__( |
|
self, |
|
model='ViT-L/14', |
|
jit=False, |
|
device='cuda' if torch.cuda.is_available() else 'cpu', |
|
antialias=False, |
|
): |
|
super().__init__() |
|
from . import clip_justin |
|
self.model, _ = clip_justin.load(name=model, device=device, jit=jit) |
|
self.device = device |
|
self.antialias = antialias |
|
|
|
self.register_buffer('mean', torch.Tensor([0.48145466, 0.4578275, 0.40821073]), persistent=False) |
|
self.register_buffer('std', torch.Tensor([0.26862954, 0.26130258, 0.27577711]), persistent=False) |
|
|
|
|
|
self.freeze() |
|
|
|
def freeze(self): |
|
self.transformer = self.model.eval() |
|
for param in self.parameters(): |
|
param.requires_grad = False |
|
|
|
def preprocess(self, x): |
|
import kornia |
|
|
|
x = kornia.geometry.resize(x, (224, 224), |
|
interpolation='bicubic',align_corners=True, |
|
antialias=self.antialias) |
|
x = (x + 1.) / 2. |
|
|
|
x = kornia.enhance.normalize(x, self.mean, self.std) |
|
return x |
|
|
|
def forward(self, x): |
|
|
|
return self.model.encode_image(self.preprocess(x)).float() |
|
|
|
def encode(self, im): |
|
return self(im).unsqueeze(1) |
|
|