import torch
import torch.nn as nn
import torch.nn.functional as F
from transformers import AutoModelForCausalLM, BitsAndBytesConfig, AutoTokenizer, AutoModel, AutoProcessor, PreTrainedModel, SiglipModel, SiglipProcessor
from peft import LoraConfig, get_peft_model, get_peft_model_state_dict, set_peft_model_state_dict
from transformers.modeling_outputs import CausalLMOutputWithPast
from configs.config import ModelConfig, TrainConfig, PathConfig


class GeoCLIP(PreTrainedModel):
    config_class = ModelConfig

    def __init__(self, config):
        super().__init__(config)
        self.config = config
        self.vision_model = AutoModel.from_pretrained(self.config.vision_model)
        self.processor = AutoProcessor.from_pretrained(self.config.vision_model)
        self.llm_model = AutoModelForCausalLM.from_pretrained(self.config.llm_model, trust_remote_code=True)
        lora_config = LoraConfig(
            r=64,  # 秩
            lora_alpha=128,
            target_modules=["q_proj", "k_proj", "w2"],  # 针对Qwen的注意力模块
            lora_dropout=0.1,
            bias="lora_only",
            task_type="CAUSAL_LM"
        )
        self.llm_model = get_peft_model(self.llm_model, lora_config)
        self.tokenizer = AutoTokenizer.from_pretrained(
            self.config.llm_model,
            trust_remote_code=True
        )
        self.proj = nn.Sequential(
            nn.Linear(self.vision_model.config.vision_config.hidden_size * 9,
                      self.llm_model.config.hidden_size),
            nn.LayerNorm(self.llm_model.config.hidden_size),
            nn.GELU(),
            nn.Linear(self.llm_model.config.hidden_size, self.llm_model.config.hidden_size)
        ).to(TrainConfig.device)
        if self.config.freeze_vision_model:
            for param in self.vision_model.parameters():
                param.requires_grad = False

    def forward(self, input_ids, labels, pixel_values, attention_mask=None):
        text_embeds = self.llm_model.get_input_embeddings()(input_ids)

        image_embeds = self.vision_model.vision_model(pixel_values).last_hidden_state
        #print(image_embeds.shape)
        b, s, d = image_embeds.shape
        image_embeds = image_embeds.view(b, -1, d * 9)  # (b, 729, d) --> (b, 81, d*9) 压缩图片tokens
        image_features = self.proj(image_embeds)

        text_embeds = text_embeds.to(image_features.dtype)

        inputs_embeds = self.merge_input_ids_with_image_features(image_features, text_embeds, input_ids)
        outputs = self.llm_model(inputs_embeds=inputs_embeds, attention_mask=attention_mask)
        logits = outputs[0]
        loss = None
        if labels is not None:
            loss_fct = nn.CrossEntropyLoss(ignore_index=self.tokenizer.pad_token_id)
            loss = loss_fct(
                logits.view(-1, logits.size(-1)), labels.view(-1).to(logits.device)
            )
        return CausalLMOutputWithPast(loss=loss, logits=logits)

    def merge_input_ids_with_image_features(self, image_features, inputs_embeds, input_ids):

        num_images, num_image_patches, embed_dim = image_features.shape
        batch_indices, image_indices = torch.where(input_ids == self.tokenizer('<|image_pad|>')['input_ids'][0])

        inputs_embeds[batch_indices, image_indices] = image_features.view(-1, embed_dim)

        return inputs_embeds
