|
from abc import ABC, abstractmethod |
|
import re |
|
import torch |
|
import torch.nn as nn |
|
from transformers import Blip2Model, Blip2Processor, Blip2Config |
|
from .mm_utils import IGNORE_INDEX, IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_TOKEN |
|
from .mm_utils import conv_templates |
|
|
|
|
|
class BLIP2VisionTower(nn.Module): |
|
def __init__(self, vision_tower): |
|
super().__init__() |
|
self.vision_tower_name = vision_tower |
|
self.cfg_only = Blip2Config.from_pretrained(self.vision_tower_name) |
|
self.vision_tower = Blip2Model(self.cfg_only) |
|
del self.vision_tower.language_model |
|
del self.vision_tower.language_projection |
|
self.vision_tower.to(dtype=torch.float16) |
|
self.image_processor = Blip2Processor.from_pretrained(self.vision_tower_name) |
|
|
|
def feature_select(self, image_forward_outs): |
|
image_features = image_forward_outs.last_hidden_state |
|
return image_features |
|
|
|
@torch.no_grad() |
|
def forward(self, images): |
|
if type(images) is list: |
|
image_features = [] |
|
for image in images: |
|
image_forward_out = self.vision_tower.get_qformer_features(image.to(device=self.device).unsqueeze(0)) |
|
image_feature = self.feature_select(image_forward_out).to(image.dtype) |
|
image_features.append(image_feature) |
|
else: |
|
|
|
image_forward_outs = self.vision_tower.get_qformer_features(images.to(device=self.device)) |
|
image_features = self.feature_select(image_forward_outs).to(images.dtype) |
|
return image_features |
|
|
|
@property |
|
def dummy_feature(self): |
|
return torch.zeros(1, self.hidden_size, device=self.device, dtype=self.dtype) |
|
|
|
@property |
|
def dtype(self): |
|
return self.vision_tower.dtype |
|
|
|
@property |
|
def device(self): |
|
return self.vision_tower.device |
|
|
|
@property |
|
def config(self): |
|
return self.cfg_only |
|
|
|
@property |
|
def hidden_size(self): |
|
|
|
return self.config.qformer_config.hidden_size |
|
|
|
@property |
|
def num_patches(self): |
|
return self.config.num_query_tokens |
|
|
|
|
|
class IdentityMap(nn.Module): |
|
def __init__(self): |
|
super().__init__() |
|
|
|
def forward(self, x, *args, **kwargs): |
|
return x |
|
|
|
@property |
|
def config(self): |
|
return {"mm_projector_type": 'identity'} |
|
|
|
|
|
def build_vision_projector(config, delay_load=False, **kwargs): |
|
projector_type = getattr(config, 'mm_projector_type', 'linear') |
|
|
|
if projector_type == 'linear': |
|
return nn.Linear(config.mm_hidden_size, config.hidden_size) |
|
|
|
mlp_gelu_match = re.match(r'^mlp(\d+)x_gelu$', projector_type) |
|
if mlp_gelu_match: |
|
mlp_depth = int(mlp_gelu_match.group(1)) |
|
modules = [nn.Linear(config.mm_hidden_size, config.hidden_size)] |
|
for _ in range(1, mlp_depth): |
|
modules.append(nn.GELU()) |
|
modules.append(nn.Linear(config.hidden_size, config.hidden_size)) |
|
return nn.Sequential(*modules) |
|
|
|
if projector_type == 'identity': |
|
return IdentityMap() |
|
|
|
raise ValueError(f'Unknown projector type: {projector_type}') |
|
|
|
|
|
class MMAlayaMetaModel: |
|
|
|
def __init__(self, config): |
|
super(MMAlayaMetaModel, self).__init__(config) |
|
self.vision_tower = BLIP2VisionTower(config._name_or_path) |
|
self.mm_projector = build_vision_projector(config) |
|
|
|
def get_vision_tower(self): |
|
vision_tower = getattr(self, 'vision_tower', None) |
|
if type(vision_tower) is list: |
|
vision_tower = vision_tower[0] |
|
return vision_tower |
|
|
|
|
|
class MMAlayaMetaForCausalLM(ABC): |
|
|
|
@abstractmethod |
|
def get_model(self): |
|
pass |
|
|
|
def get_vision_tower(self): |
|
return self.get_model().get_vision_tower() |
|
|
|
def encode_images(self, images): |
|
image_features = self.get_model().get_vision_tower()(images) |
|
image_features = self.get_model().mm_projector(image_features.to(dtype=self.dtype)) |
|
return image_features |
|
|
|
def prepare_inputs_labels_for_multimodal( |
|
self, input_ids, position_ids, attention_mask, past_key_values, labels, images |
|
): |
|
vision_tower = self.get_vision_tower() |
|
if vision_tower is None or images is None or input_ids.shape[1] == 1: |
|
if past_key_values is not None and vision_tower is not None and images is not None and input_ids.shape[1] == 1: |
|
target_shape = past_key_values[-1][-1].shape[-2] + 1 |
|
attention_mask = torch.cat((attention_mask, torch.ones( |
|
(attention_mask.shape[0], target_shape - attention_mask.shape[1]), |
|
dtype=attention_mask.dtype, |
|
device=attention_mask.device |
|
)), dim=1) |
|
position_ids = torch.sum(attention_mask, dim=1).unsqueeze(-1) - 1 |
|
return input_ids, position_ids, attention_mask, past_key_values, None, labels |
|
|
|
if type(images) is list or images.ndim == 5: |
|
concat_images = torch.cat([image for image in images], dim=0) |
|
image_features = self.encode_images(concat_images) |
|
split_sizes = [image.shape[0] for image in images] |
|
image_features = torch.split(image_features, split_sizes, dim=0) |
|
image_features = [x.flatten(0, 1).to(self.device) for x in image_features] |
|
else: |
|
image_features = self.encode_images(images).to(self.device) |
|
|
|
|
|
if getattr(self.config, 'tune_mm_mlp_adapter', False) and getattr(self.config, 'mm_use_im_start_end', False): |
|
raise NotImplementedError |
|
|
|
|
|
|
|
|
|
|
|
_labels = labels |
|
_position_ids = position_ids |
|
_attention_mask = attention_mask |
|
if attention_mask is None: |
|
attention_mask = torch.ones_like(input_ids, dtype=torch.bool) |
|
else: |
|
attention_mask = attention_mask.bool() |
|
if position_ids is None: |
|
position_ids = torch.arange(0, input_ids.shape[1], dtype=torch.long, device=input_ids.device) |
|
if labels is None: |
|
labels = torch.full_like(input_ids, IGNORE_INDEX) |
|
|
|
|
|
input_ids = [cur_input_ids[cur_attention_mask] for cur_input_ids, cur_attention_mask in zip(input_ids, attention_mask)] |
|
labels = [cur_labels[cur_attention_mask] for cur_labels, cur_attention_mask in zip(labels, attention_mask)] |
|
|
|
new_input_embeds = [] |
|
new_labels = [] |
|
cur_image_idx = 0 |
|
for batch_idx, cur_input_ids in enumerate(input_ids): |
|
num_images = (cur_input_ids == IMAGE_TOKEN_INDEX).sum() |
|
if num_images == 0: |
|
cur_image_features = image_features[cur_image_idx] |
|
cur_input_embeds_1 = self.get_model().embed_tokens(cur_input_ids) |
|
cur_input_embeds = torch.cat([cur_input_embeds_1, cur_image_features[0:0]], dim=0) |
|
new_input_embeds.append(cur_input_embeds) |
|
new_labels.append(labels[batch_idx]) |
|
cur_image_idx += 1 |
|
continue |
|
|
|
image_token_indices = [-1] + torch.where(cur_input_ids == IMAGE_TOKEN_INDEX)[0].tolist() + [cur_input_ids.shape[0]] |
|
cur_input_ids_noim = [] |
|
cur_labels = labels[batch_idx] |
|
cur_labels_noim = [] |
|
for i in range(len(image_token_indices) - 1): |
|
cur_input_ids_noim.append(cur_input_ids[image_token_indices[i]+1:image_token_indices[i+1]]) |
|
cur_labels_noim.append(cur_labels[image_token_indices[i]+1:image_token_indices[i+1]]) |
|
split_sizes = [x.shape[0] for x in cur_labels_noim] |
|
cur_input_embeds = self.get_model().embed_tokens(torch.cat(cur_input_ids_noim)) |
|
cur_input_embeds_no_im = torch.split(cur_input_embeds, split_sizes, dim=0) |
|
cur_new_input_embeds = [] |
|
cur_new_labels = [] |
|
|
|
for i in range(num_images + 1): |
|
cur_new_input_embeds.append(cur_input_embeds_no_im[i]) |
|
cur_new_labels.append(cur_labels_noim[i]) |
|
if i < num_images: |
|
cur_image_features = image_features[cur_image_idx] |
|
cur_image_idx += 1 |
|
cur_new_input_embeds.append(cur_image_features) |
|
cur_new_labels.append(torch.full((cur_image_features.shape[0],), IGNORE_INDEX, device=cur_labels.device, dtype=cur_labels.dtype)) |
|
|
|
cur_new_input_embeds = torch.cat(cur_new_input_embeds) |
|
cur_new_labels = torch.cat(cur_new_labels) |
|
|
|
new_input_embeds.append(cur_new_input_embeds) |
|
new_labels.append(cur_new_labels) |
|
|
|
|
|
tokenizer_model_max_length = getattr(self.config, 'tokenizer_model_max_length', None) |
|
if tokenizer_model_max_length is not None: |
|
new_input_embeds = [x[:tokenizer_model_max_length] for x in new_input_embeds] |
|
new_labels = [x[:tokenizer_model_max_length] for x in new_labels] |
|
|
|
|
|
max_len = max(x.shape[0] for x in new_input_embeds) |
|
batch_size = len(new_input_embeds) |
|
|
|
new_input_embeds_padded = [] |
|
new_labels_padded = torch.full((batch_size, max_len), IGNORE_INDEX, dtype=new_labels[0].dtype, device=new_labels[0].device) |
|
attention_mask = torch.zeros((batch_size, max_len), dtype=attention_mask.dtype, device=attention_mask.device) |
|
position_ids = torch.zeros((batch_size, max_len), dtype=position_ids.dtype, device=position_ids.device) |
|
|
|
for i, (cur_new_embed, cur_new_labels) in enumerate(zip(new_input_embeds, new_labels)): |
|
cur_len = cur_new_embed.shape[0] |
|
if getattr(self.config, 'tokenizer_padding_side', 'right') == "left": |
|
new_input_embeds_padded.append(torch.cat(( |
|
torch.zeros((max_len - cur_len, cur_new_embed.shape[1]), dtype=cur_new_embed.dtype, device=cur_new_embed.device), |
|
cur_new_embed |
|
), dim=0)) |
|
if cur_len > 0: |
|
new_labels_padded[i, -cur_len:] = cur_new_labels |
|
attention_mask[i, -cur_len:] = True |
|
position_ids[i, -cur_len:] = torch.arange(0, cur_len, dtype=position_ids.dtype, device=position_ids.device) |
|
else: |
|
new_input_embeds_padded.append(torch.cat(( |
|
cur_new_embed, |
|
torch.zeros((max_len - cur_len, cur_new_embed.shape[1]), dtype=cur_new_embed.dtype, device=cur_new_embed.device) |
|
), dim=0)) |
|
if cur_len > 0: |
|
new_labels_padded[i, :cur_len] = cur_new_labels |
|
attention_mask[i, :cur_len] = True |
|
position_ids[i, :cur_len] = torch.arange(0, cur_len, dtype=position_ids.dtype, device=position_ids.device) |
|
|
|
new_input_embeds = torch.stack(new_input_embeds_padded, dim=0) |
|
|
|
if _labels is None: |
|
new_labels = None |
|
else: |
|
new_labels = new_labels_padded |
|
|
|
if _attention_mask is None: |
|
attention_mask = None |
|
else: |
|
attention_mask = attention_mask.to(dtype=_attention_mask.dtype) |
|
|
|
if _position_ids is None: |
|
position_ids = None |
|
|
|
return None, position_ids, attention_mask, past_key_values, new_input_embeds, new_labels |
|
|
|
def initialize_tokenizer(self, tokenizer): |
|
tokenizer.add_tokens([DEFAULT_IMAGE_TOKEN], special_tokens=True) |
|
self.resize_token_embeddings(len(tokenizer)) |
|
|
|
def prepare_for_inference( |
|
self, |
|
prompt, |
|
tokenizer, |
|
image, |
|
image_token_index=IMAGE_TOKEN_INDEX, |
|
return_tensors=None |
|
): |
|
|
|
conv = conv_templates["mmalaya_llama"].copy() |
|
inp = DEFAULT_IMAGE_TOKEN + '\n' + prompt |
|
conv.append_message(conv.roles[0], inp) |
|
conv.append_message(conv.roles[1], None) |
|
prompt = conv.get_prompt() |
|
|
|
prompt_chunks = [tokenizer(chunk).input_ids for chunk in prompt.split('<image>')] |
|
|
|
def insert_separator(X, sep): |
|
return [ele for sublist in zip(X, [sep]*len(X)) for ele in sublist][:-1] |
|
|
|
input_ids = [] |
|
offset = 0 |
|
if len(prompt_chunks) > 0 and len(prompt_chunks[0]) > 0 and prompt_chunks[0][0] == tokenizer.bos_token_id: |
|
offset = 1 |
|
input_ids.append(prompt_chunks[0][0]) |
|
|
|
for x in insert_separator(prompt_chunks, [image_token_index] * (offset + 1)): |
|
input_ids.extend(x[offset:]) |
|
|
|
if return_tensors is not None: |
|
if return_tensors == 'pt': |
|
return torch.tensor(input_ids, dtype=torch.long) |
|
raise ValueError(f'Unsupported tensor type: {return_tensors}') |
|
|
|
|
|
stopping_criteria = KeywordsStoppingCriteria([conv.sep2], tokenizer, input_ids) |
|
|
|
image_processor = model.get_vision_tower().image_processor |
|
image_tensor = image_processor(image, return_tensors='pt')['pixel_values'].half().cuda() |
|
|
|
return input_ids, image_tensor, stopping_criteria |