from transformers import AutoImageProcessor, ViTMAEForPreTraining
from transformers import AutoModelForCausalLM, AutoTokenizer, Qwen2ForCausalLM
import torch.nn as nn
import torch
from typing import List, Dict
from PIL import Image

def getVisionProcessor(pretrained_model_name='facebook/vit-mae-base'):
    processor = AutoImageProcessor.from_pretrained(pretrained_model_name)
    return processor

def getVisionEncoder(pretrained_model_name='facebook/vit-mae-base'):
    model = ViTMAEForPreTraining.from_pretrained(pretrained_model_name)
    return model.vit

def getTokenizer(pretrained_model_name="Qwen/Qwen2-1.5B-Instruct"):
    tokenizer = AutoTokenizer.from_pretrained(pretrained_model_name)
    special_tokens_dict = {'additional_special_tokens': ['<img>', '<\\img>']}
    tokenizer.add_special_tokens(special_tokens_dict)
    return tokenizer

def getLLMModel(pretrained_model_name="Qwen/Qwen2-1.5B-Instruct", tokenizer=None):
    model = AutoModelForCausalLM.from_pretrained(
        pretrained_model_name,
        torch_dtype="auto",
        device_map="cuda"
    )
    if tokenizer:
        model.resize_token_embeddings(len(tokenizer))
    return model

class MultiModalModel(nn.Module):
    def __init__(self):
        super().__init__()
        self.processor = getVisionProcessor()
        self.vit = getVisionEncoder().to('cuda')
        self.tokenizer = getTokenizer()
        self.llm = getLLMModel(tokenizer=self.tokenizer).to('cuda')
        
        self.llm_hidden_size = self.llm.config.hidden_size
        self.vit_hidden_size = self.vit.config.hidden_size
        
        self.vision_projection = nn.Sequential(
            nn.Linear(self.vit_hidden_size, self.llm_hidden_size * 2),
            nn.GELU(),
            nn.Linear(self.llm_hidden_size * 2, self.llm_hidden_size)
        ).to('cuda')
        
        # 冻结self.vit和self.llm的参数
        for param in self.vit.parameters():
            param.requires_grad = False
        for param in self.llm.parameters():
            param.requires_grad = False

    def insert_projected_vision_states(self, text_embeddings, projected_vision_states, img_token_positions, attention_mask):
        """
        插入projected_vision_states到text_embeddings的img_token_position位置。

        参数:
        - text_embeddings: torch.Tensor, 形状为 (batch_size, seq_len, dim)
        - projected_vision_states: torch.Tensor, 形状为 (batch_size, proj_seq_len, dim)
        - img_token_positions: List[int], 每个batch中的img_token位置
        - attention_mask: torch.Tensor, 形状为 (batch_size, seq_len)

        返回:
        - new_text_embeddings: torch.Tensor, 新的拼接后的张量，形状为 (batch_size, new_seq_len, dim)
        - new_attention_mask: torch.Tensor, 新的attention掩码形状为 (batch_size, new_seq_len)
        """
        batch_size, seq_len, dim = text_embeddings.shape
        proj_seq_len = projected_vision_states.shape[1]
        new_seq_len = seq_len + proj_seq_len

        # 初始化新的张量和掩码
        new_text_embeddings = torch.zeros(batch_size, new_seq_len, dim, device=text_embeddings.device).bfloat16()
        new_attention_mask = torch.zeros(batch_size, new_seq_len, device=text_embeddings.device).int()
        projected_vision_mask = torch.ones(batch_size, proj_seq_len, device=text_embeddings.device).int()
        
        for i, pos in enumerate(img_token_positions):
            new_text_embeddings[i] = torch.cat((text_embeddings[i, :pos, :], 
                                                projected_vision_states[i], 
                                                text_embeddings[i, pos:, :]), dim=0)
            
            new_attention_mask[i] = torch.cat((attention_mask[i, :pos], 
                                                projected_vision_mask[i], 
                                                attention_mask[i, pos:]), dim=0)
        
        return new_text_embeddings, new_attention_mask

    def forward(self, messages_list: List[List[Dict]], images_list: List[Image.Image], return_lm_loss=False):
        device = 'cuda'
        
        # Process image through vision model
        inputs = self.processor(images=images_list, return_tensors="pt").to(device)
        vision_outputs = self.vit(**inputs)
        vision_last_hidden_states = vision_outputs.last_hidden_state
        # Project vision outputs to LLM hidden size
        projected_vision_states = self.vision_projection(vision_last_hidden_states).to('cuda')
        projected_vision_states = projected_vision_states.bfloat16()
        # Prepare text inputs
        text_list = self.tokenizer.apply_chat_template(messages_list, tokenize=False, add_generation_prompt=True)
        tokenized_inputs = self.tokenizer(text_list, return_tensors="pt", padding=True).to(device)
        
        # Find the position of <img> token
        img_token_id = self.tokenizer.convert_tokens_to_ids("<img>")
        img_token_positions = torch.argmax((tokenized_inputs.input_ids == img_token_id).int(), dim=-1).to('cuda') + 1
        # Embed text inputs
        text_embeddings = self.llm.get_input_embeddings()(tokenized_inputs.input_ids).to('cuda')
        
        # Concatenate image features at the <img> token position
        text_embeddings, attention_mask = self.insert_projected_vision_states(
            text_embeddings, 
            projected_vision_states, 
            img_token_positions, 
            tokenized_inputs.attention_mask.to('cuda')
        )
        
        # Adjust labels for loss computation
        if return_lm_loss:
            # Create labels with the same shape as the modified text_embeddings
            labels = tokenized_inputs.input_ids.clone()
            # Create a tensor for the new labels with padding
            new_labels = torch.full((labels.shape[0], text_embeddings.shape[1]), fill_value=151643).to(device)
            
            # Insert original labels into new_labels, accounting for image token positions
            for i, pos in enumerate(img_token_positions):
                new_labels[i, :pos] = labels[i, :pos]  # Before <img> token
                new_labels[i, pos + projected_vision_states.shape[1]:] = labels[i, pos:]  # After <img> token
                
            print(new_labels)
            
            outputs = self.llm(
                inputs_embeds=text_embeddings, 
                attention_mask=attention_mask, 
                labels=new_labels,
                output_hidden_states=True
            )
            return projected_vision_states, img_token_positions, outputs
        else:
            outputs = self.llm(
                inputs_embeds=text_embeddings, 
                attention_mask=attention_mask, 
                output_hidden_states=True
            )
            return projected_vision_states, img_token_positions, outputs

    def __init__(self):
        super().__init__()
        self.processor = getVisionProcessor()
        self.vit = getVisionEncoder().to('cuda')
        self.tokenizer = getTokenizer()
        self.llm = getLLMModel(tokenizer=self.tokenizer)
        
        self.llm_hidden_size = self.llm.config.hidden_size
        self.vit_hidden_size = self.vit.config.hidden_size
        
        self.vision_projection = nn.Sequential(
            nn.Linear(self.vit_hidden_size, self.llm_hidden_size * 2),
            nn.GELU(),
            nn.Linear(self.llm_hidden_size * 2, self.llm_hidden_size)
        )
        
        # 冻结self.vit和self.llm的参数
        for param in self.vit.parameters():
            param.requires_grad = False
        for param in self.llm.parameters():
            param.requires_grad = False

    def insert_projected_vision_states(self, text_embeddings, projected_vision_states, img_token_positions, attention_mask):
        """
        插入projected_vision_states到text_embeddings的img_token_position位置。

        参数:
        - text_embeddings: torch.Tensor, 形状为 (batch_size, seq_len, dim)
        - projected_vision_states: torch.Tensor, 形状为 (batch_size, proj_seq_len, dim)
        - img_token_positions: List[int], 每个batch中的img_token位置
        - attention_mask: torch.Tensor, 形状为 (batch_size, seq_len)

        返回:
        - new_text_embeddings: torch.Tensor, 新的拼接后的张量，形状为 (batch_size, new_seq_len, dim)
        - new_attention_mask: torch.Tensor, 新的attention掩码形状为 (batch_size, new_seq_len)
        """
        batch_size, seq_len, dim = text_embeddings.shape
        proj_seq_len = projected_vision_states.shape[1]
        new_seq_len = seq_len + proj_seq_len

        # 初始化新的张量和掩码
        new_text_embeddings = torch.zeros(batch_size, new_seq_len, dim, device=text_embeddings.device).bfloat16()
        new_attention_mask = torch.zeros(batch_size, new_seq_len, device=text_embeddings.device).int()
        projected_vision_mask = torch.ones(batch_size, proj_seq_len, device=text_embeddings.device).int()
        
        for i, pos in enumerate(img_token_positions):
            new_text_embeddings[i] = torch.cat((text_embeddings[i, :pos, :], 
                                                projected_vision_states[i], 
                                                text_embeddings[i, pos:, :]), dim=0)
            
            new_attention_mask[i] = torch.cat((attention_mask[i, :pos], 
                                                projected_vision_mask[i], 
                                                attention_mask[i, pos:]), dim=0)
        
        return new_text_embeddings, new_attention_mask

    def forward(self, messages_list: List[List[Dict]], images_list: List[Image.Image], return_lm_loss=False):
        device = 'cuda'
        
        # Process image through vision model
        inputs = self.processor(images=images_list, return_tensors="pt").to(device)
        vision_outputs = self.vit(**inputs)
        vision_last_hidden_states = vision_outputs.last_hidden_state
        # Project vision outputs to LLM hidden size
        projected_vision_states = self.vision_projection(vision_last_hidden_states)
        projected_vision_states = projected_vision_states.bfloat16()
        # Prepare text inputs
        text_list = self.tokenizer.apply_chat_template(messages_list, tokenize=False, add_generation_prompt=True)
        tokenized_inputs = self.tokenizer(text_list, return_tensors="pt", padding=True).to(device)
        
        # Find the position of <img> token
        img_token_id = self.tokenizer.convert_tokens_to_ids("<img>")
        img_token_positions = torch.argmax((tokenized_inputs.input_ids == img_token_id).int(), dim=-1)+1
        # Embed text inputs
        text_embeddings = self.llm.get_input_embeddings()(tokenized_inputs.input_ids)
        
        # Concatenate image features at the <img> token position
        text_embeddings, attention_mask = self.insert_projected_vision_states(
            text_embeddings, 
            projected_vision_states, 
            img_token_positions, 
            tokenized_inputs.attention_mask
        )
        
        # Adjust labels for loss computation
        if return_lm_loss:
            # Create labels with the same shape as the modified text_embeddings
            labels = tokenized_inputs.input_ids.clone()
            # Create a tensor for the new labels with padding
            new_labels = torch.full((labels.shape[0], text_embeddings.shape[1]), fill_value=151643).to(device)
            
            # Insert original labels into new_labels, accounting for image token positions
            for i, pos in enumerate(img_token_positions):
                new_labels[i, :pos] = labels[i, :pos]  # Before <img> token
                new_labels[i, pos + projected_vision_states.shape[1]:] = labels[i, pos:]  # After <img> token
                
            
            outputs = self.llm(
                inputs_embeds=text_embeddings, 
                attention_mask=attention_mask, 
                labels=new_labels,
                output_hidden_states=True
            )
            return projected_vision_states, img_token_positions, outputs
        else:
            outputs = self.llm(
                inputs_embeds=text_embeddings, 
                attention_mask=attention_mask, 
                output_hidden_states=True
            )
            return projected_vision_states, img_token_positions, outputs
