Spaces:
Runtime error
Runtime error
| # Copyright (c) OpenMMLab. All rights reserved. | |
| from xtuner.model.utils import * | |
| from typing import List, Optional | |
| import torch | |
| from transformers import PreTrainedModel | |
| from xtuner.utils import IGNORE_INDEX, IMAGE_TOKEN_INDEX | |
| def prepare_inputs_labels_for_multimodal_with_visual_prompts( | |
| llm: PreTrainedModel, | |
| input_ids: torch.LongTensor = None, | |
| position_ids: Optional[torch.LongTensor] = None, | |
| attention_mask: Optional[torch.Tensor] = None, | |
| past_key_values: Optional[List[torch.FloatTensor]] = None, | |
| labels: Optional[torch.LongTensor] = None, | |
| pixel_values: Optional[torch.FloatTensor] = None, | |
| region_id=None, | |
| regions_feats=None, | |
| mark_id=None, | |
| mark_feats=None, | |
| **kwargs, | |
| ): | |
| if pixel_values is None: | |
| return { | |
| 'input_ids': input_ids, | |
| 'position_ids': position_ids, | |
| 'attention_mask': attention_mask, | |
| 'past_key_values': past_key_values, | |
| 'inputs_embeds': None, | |
| 'labels': labels | |
| } | |
| _labels = labels | |
| _position_ids = position_ids | |
| _attention_mask = attention_mask | |
| if attention_mask is None: | |
| attention_mask = torch.ones_like(input_ids, dtype=torch.bool) | |
| else: | |
| attention_mask = attention_mask.bool() | |
| if position_ids is None: | |
| position_ids = torch.arange( | |
| 0, input_ids.shape[1], dtype=torch.long, device=input_ids.device) | |
| if labels is None: | |
| labels = torch.full_like(input_ids, IGNORE_INDEX) | |
| # remove the padding using attention_mask -- TODO: double check | |
| input_ids = [ | |
| cur_input_ids[cur_attention_mask] | |
| for cur_input_ids, cur_attention_mask in zip(input_ids, attention_mask) | |
| ] | |
| labels = [ | |
| cur_labels[cur_attention_mask] | |
| for cur_labels, cur_attention_mask in zip(labels, attention_mask) | |
| ] | |
| new_inputs_embeds = [] | |
| new_labels = [] | |
| cur_image_idx = 0 | |
| cur_region_idx = 0 | |
| cur_mark_id = 0 | |
| for batch_idx, cur_input_ids in enumerate(input_ids): | |
| num_images = (cur_input_ids == IMAGE_TOKEN_INDEX).sum() | |
| if num_images == 0: | |
| cur_pixel_values = pixel_values[cur_image_idx] | |
| cur_inputs_embeds_1 = llm.get_input_embeddings()(cur_input_ids) | |
| cur_inputs_embeds = torch.cat( | |
| [cur_inputs_embeds_1, cur_pixel_values[0:0]], dim=0) | |
| new_inputs_embeds.append(cur_inputs_embeds) | |
| new_labels.append(labels[batch_idx]) | |
| cur_image_idx += 1 | |
| continue | |
| need_replace = cur_input_ids == IMAGE_TOKEN_INDEX | |
| need_replace = torch.logical_or(need_replace, cur_input_ids == region_id) | |
| need_replace = torch.logical_or(need_replace, cur_input_ids == mark_id) | |
| num_replace = need_replace.sum() | |
| replace_type = cur_input_ids[need_replace] | |
| image_token_indices = [-1] + torch.where( | |
| need_replace)[0].tolist() + [ | |
| cur_input_ids.shape[0] | |
| ] | |
| cur_input_ids_noim = [] | |
| cur_labels = labels[batch_idx] | |
| cur_labels_noim = [] | |
| for i in range(len(image_token_indices) - 1): | |
| cur_input_ids_noim.append(cur_input_ids[image_token_indices[i] + | |
| 1:image_token_indices[i + | |
| 1]]) | |
| cur_labels_noim.append(cur_labels[image_token_indices[i] + | |
| 1:image_token_indices[i + 1]]) | |
| split_sizes = [x.shape[0] for x in cur_labels_noim] | |
| cur_inputs_embeds = llm.get_input_embeddings()( | |
| torch.cat(cur_input_ids_noim)) | |
| cur_inputs_embeds_no_im = torch.split( | |
| cur_inputs_embeds, split_sizes, dim=0) | |
| cur_new_inputs_embeds = [] | |
| cur_new_labels = [] | |
| for i in range(num_replace + 1): | |
| cur_new_inputs_embeds.append(cur_inputs_embeds_no_im[i]) | |
| cur_new_labels.append(cur_labels_noim[i]) | |
| if i < num_replace: | |
| # image | |
| if replace_type[i] == IMAGE_TOKEN_INDEX: | |
| cur_pixel_values = pixel_values[cur_image_idx] | |
| cur_image_idx += 1 | |
| cur_new_inputs_embeds.append(cur_pixel_values) | |
| cur_new_labels.append( | |
| torch.full((cur_pixel_values.shape[0], ), | |
| IGNORE_INDEX, | |
| device=cur_labels.device, | |
| dtype=cur_labels.dtype)) | |
| elif replace_type[i] == region_id: | |
| cur_pixel_values = regions_feats[cur_region_idx:cur_region_idx+1] | |
| cur_region_idx += 1 | |
| cur_new_inputs_embeds.append(cur_pixel_values) | |
| cur_new_labels.append( | |
| torch.full((cur_pixel_values.shape[0],), | |
| IGNORE_INDEX, | |
| device=cur_labels.device, | |
| dtype=cur_labels.dtype)) | |
| elif replace_type[i] == mark_id: | |
| cur_pixel_values = mark_feats[cur_mark_id:cur_mark_id + 1] | |
| cur_mark_id += 1 | |
| cur_new_inputs_embeds.append(cur_pixel_values) | |
| cur_new_labels.append( | |
| torch.full((cur_pixel_values.shape[0],), | |
| IGNORE_INDEX, | |
| device=cur_labels.device, | |
| dtype=cur_labels.dtype)) | |
| cur_new_inputs_embeds = torch.cat(cur_new_inputs_embeds) | |
| cur_new_labels = torch.cat(cur_new_labels) | |
| new_inputs_embeds.append(cur_new_inputs_embeds) | |
| new_labels.append(cur_new_labels) | |
| # Combine them | |
| max_len = max(x.shape[0] for x in new_inputs_embeds) | |
| batch_size = len(new_inputs_embeds) | |
| new_inputs_embeds_padded = [] | |
| new_labels_padded = torch.full((batch_size, max_len), | |
| IGNORE_INDEX, | |
| dtype=new_labels[0].dtype, | |
| device=new_labels[0].device) | |
| attention_mask = torch.zeros((batch_size, max_len), | |
| dtype=attention_mask.dtype, | |
| device=attention_mask.device) | |
| position_ids = torch.zeros((batch_size, max_len), | |
| dtype=position_ids.dtype, | |
| device=position_ids.device) | |
| for i, (cur_new_embed, | |
| cur_new_labels) in enumerate(zip(new_inputs_embeds, new_labels)): | |
| cur_len = cur_new_embed.shape[0] | |
| new_inputs_embeds_padded.append( | |
| torch.cat((cur_new_embed, | |
| torch.zeros((max_len - cur_len, cur_new_embed.shape[1]), | |
| dtype=cur_new_embed.dtype, | |
| device=cur_new_embed.device)), | |
| dim=0)) | |
| if cur_len > 0: | |
| new_labels_padded[i, :cur_len] = cur_new_labels | |
| attention_mask[i, :cur_len] = True | |
| position_ids[i, :cur_len] = torch.arange( | |
| 0, | |
| cur_len, | |
| dtype=position_ids.dtype, | |
| device=position_ids.device) | |
| new_inputs_embeds = torch.stack(new_inputs_embeds_padded, dim=0) | |
| if _labels is None: | |
| new_labels = None | |
| else: | |
| new_labels = new_labels_padded | |
| if _attention_mask is None: | |
| attention_mask = None | |
| else: | |
| attention_mask = attention_mask.to(dtype=_attention_mask.dtype) | |
| if _position_ids is None: | |
| position_ids = None | |
| return { | |
| 'input_ids': None, | |
| 'position_ids': position_ids, | |
| 'attention_mask': attention_mask, | |
| 'past_key_values': past_key_values, | |
| 'inputs_embeds': new_inputs_embeds, | |
| 'labels': new_labels, | |
| } | |