import os
os.environ["FLAGS_use_cuda_managed_memory"] = "true"
import copy
from typing import List, Tuple, Union, Dict, Optional
import re
import requests
import paddle
import paddle.nn as nn
from PIL import Image
from paddle.vision import transforms
from paddlenlp.transformers import ChatGLMForConditionalGeneration
from paddlenlp.transformers import Blip2VisionModel
from paddlenlp.transformers import Blip2QFormerModel
from paddlenlp.transformers import BlipImageProcessor
from paddlenlp.transformers import ChatGLMTokenizer
from paddlenlp.transformers import Blip2Config
from paddlenlp.transformers import PretrainedConfig
from paddlenlp.transformers import ChatGLMConfig
from paddlenlp.transformers import PretrainedModel
from paddlenlp.transformers.generation_utils import (LogitsProcessorList, LogitsProcessor)
from paddlenlp.transformers import ChatGLMTokenizer
from paddlenlp.transformers import Blip2VisionConfig
from paddlenlp.transformers import Blip2QFormerConfig
from paddlenlp.transformers import Blip2ForConditionalGeneration
from paddlenlp.transformers.blip_2.modeling import Parameter
from loguru import logger


class InvalidScoreLogitsProcessor(LogitsProcessor):
    def __call__(self, input_ids: paddle.int64, scores: paddle.Tensor) -> paddle.Tensor:
        if paddle.isnan(scores).any():
            scores.zero_()
            scores[..., 5] = 5e4
        return scores

        
class BlipImageEvalProcessor(BlipImageProcessor):
    def __init__(self, image_size=384, mean=None, std=None):
        super().__init__(mean=mean, std=std)
        if mean is None:
            mean = (0.48145466, 0.4578275, 0.40821073)
        if std is None:
            std = (0.26862954, 0.26130258, 0.27577711)
        self.transform = transforms.Compose(
            [
                transforms.Resize(
                    (image_size, image_size),
                ),
                transforms.ToTensor(),
                transforms.Normalize(mean, std),
            ]
        )

    def __call__(self, item):
        return self.transform(item)
   

class VisualGLMConfig(PretrainedConfig):
    model_type = "visualglm"
    is_composition = True

    def __init__(self, vision_config=None, qformer_config=None, text_config=None, num_query_tokens=32, **kwargs):
        super().__init__(**kwargs)

        if vision_config is None:
            vision_config = {}
            logger.info("vision_config is None. initializing the Blip2VisionConfig with default values.")

        if qformer_config is None:
            qformer_config = {}
            logger.info("qformer_config is None. Initializing the Blip2QFormerConfig with default values.")

        if text_config is None:
            text_config = {}
            logger.info("text_config is None. Initializing the text config with default values (`OPTConfig`).")
            
        self.vision_config = Blip2VisionConfig(**vision_config)
        self.qformer_config = Blip2QFormerConfig(**qformer_config)
        self.text_config = ChatGLMConfig(**text_config)

        self.num_query_tokens = num_query_tokens
        self.qformer_config.encoder_hidden_size = self.vision_config.hidden_size
        self.initializer_factor = 1.0
        self.initializer_range = 0.02

    @classmethod
    def from_vision_qformer_text_configs(
        cls,
        vision_config: Blip2VisionConfig,
        qformer_config: Blip2QFormerConfig,
        text_config: ChatGLMConfig,
        **kwargs,
    ):
        r"""
        Instantiate a [`Blip2Config`] (or a derived class) from a BLIP-2 vision model, Q-Former and language model
        configurations.
        Returns:
            [`Blip2Config`]: An instance of a configuration object
        """

        return cls(
            vision_config=vision_config.to_dict(),
            qformer_config=qformer_config.to_dict(),
            text_config=text_config.to_dict(),
            **kwargs,
        )

    def to_dict(self):
        """
        Serializes this instance to a Python dictionary. Override the default [`~PretrainedConfig.to_dict`].
        Returns:
            `Dict[str, any]`: Dictionary of all the attributes that make up this configuration instance,
        """
        output = copy.deepcopy(self.__dict__)
        output["vision_config"] = self.vision_config.to_dict()
        output["qformer_config"] = self.qformer_config.to_dict()
        output["text_config"] = self.text_config.to_dict()
        output["model_type"] = self.__class__.model_type
        return output

    
class VisualGLM(PretrainedModel):
    config_class = VisualGLMConfig
    # main_input_name = "pixel_values"
    
    def __init__(self, config:VisualGLMConfig):
        super(VisualGLM, self).__init__(config)
        self.vision_model = Blip2VisionModel(config.vision_config)
        self.query_tokens = Parameter(paddle.zeros([1, config.num_query_tokens, config.qformer_config.hidden_size], dtype="float16"))
        self.qformer = Blip2QFormerModel(config.qformer_config)
        self.language_projection = nn.Linear(config.qformer_config.hidden_size, config.text_config.hidden_size)
        self.language_model = ChatGLMForConditionalGeneration(config.text_config)
        self.image_length = config.num_query_tokens
        
    def prepare_inputs_for_generation(self, input_ids, position_ids=None, attention_mask=None, past_key_values=None, cache=None, **kwargs):
        images = kwargs['images']
        pre_image_length = kwargs['pre_image_length']
        batch_size, seq_length = input_ids.shape
        MASK, gMASK = self.config.text_config.mask_token_id, self.config.text_config.gmask_token_id
        use_gmasks = []
        mask_positions = []
        for seq in input_ids:
            mask_token = gMASK if gMASK in seq else MASK
            use_gmask = mask_token == gMASK
            use_gmasks.append(use_gmask)
            mask_positions.append(paddle.where(seq == mask_token)[0][0])

        if cache is not None or past_key_values is not None:
            last_token = input_ids[:, -1].unsqueeze(-1)
            if attention_mask is not None and attention_mask.dtype == paddle.int64:
                attention_mask = attention_mask[:, :, -1:]
            else:
                attention_mask = self.language_model.get_masks(input_ids)[:, :, -1:]
            if position_ids is not None:
                position_ids = position_ids[..., -1:]
            else:
                if self.language_model.position_encoding_2d:
                    context_lengths = []
                    for seq in input_ids:
                        context_lengths.append(paddle.where(seq == self.config.text_config.bos_token_id)[0][0])
                    context_lengths = paddle.to_tensor(context_lengths, dtype="int64")
                    block_position_ids = seq_length - context_lengths
                    position_ids = paddle.concat(
                        [paddle.to_tensor(mask_positions, dtype="int64"), block_position_ids], axis=1
                    ).unsqueeze(-1)
                else:
                    position_ids = paddle.to_tensor(mask_positions, dtype="int64").unsqueeze(-1)

            if cache is None:
                cache = past_key_values
            return {
                "input_ids": last_token,
                "cache": cache[-1],
                "position_ids": position_ids,
                "use_cache": True,
                "attention_mask": attention_mask,
                "images": images,
                "pre_image_length": pre_image_length,
            }
        else:
            if attention_mask is not None and attention_mask.dtype != paddle.int64:
                logger.warning(f"The dtype of attention mask ({attention_mask.dtype}) is not int64")
                attention_mask = None
            if attention_mask is None:
                attention_mask = self.language_model.get_masks(input_ids)
            if position_ids is None:
                position_ids = self.language_model.get_position_ids(input_ids, mask_positions=mask_positions, use_gmasks=use_gmasks)

            return {
                "input_ids": input_ids,
                "cache": cache,
                "position_ids": position_ids,
                "use_cache": True,
                "attention_mask": attention_mask,
                "images":images,
                "pre_image_length": pre_image_length,
            }   
        
    @staticmethod
    def process_image(text, image=None):
        '''Process image in text.
        Args:
            text: str, text.
            image: Optional, image path / url / PIL image.
        '''
        from PIL import Image
        from io import BytesIO

        image_position = text.rfind("<img>") + 5
        # extract path from <img></img> using re
        image_path = re.findall(r"<img>(.*?)</img>", text)
        image_path = image_path[-1] if image_path else None
        if image_path is not None:
            assert image is None, "image and image_path cannot be both not None."
            text = text.replace(f"<img>{image_path}</img>", "<img></img>")
            # url
            if image_path.startswith("http"):
                response = requests.get(image_path, timeout=10)
                image = Image.open(BytesIO(response.content))
            # local path
            else:
                image = Image.open(image_path)
        if image is not None:
            processor = BlipImageEvalProcessor(224)
            image = processor(image.convert('RGB'))
            image = image.unsqueeze(0)
            image = paddle.cast(image, dtype=paddle.get_default_dtype())
        return text, image_position, image

    def build_inputs_with_image(self, tokenizer:ChatGLMTokenizer, image_path: str, query: str, history: List[Tuple[str, str]] = None):
        image_path = image_path.strip()
        if image_path:
            prompt = "<img>{}</img>".format(image_path)
        else:
            prompt = ""
        for i, (old_query, response) in enumerate(history):  # history removes image urls/paths, while query does not.
            prompt += "问：{}\n答：{}\n".format(old_query, response)
        prompt += "问：{}\n答：".format(query)
        prompt, image_position, paddle_image = self.process_image(prompt)
        if paddle_image is not None:
            paddle_image = paddle_image
            # tokenizer(label, max_length=data_args.tgt_length, truncation=True, padding="max_length")["input_ids"],
            
            input0 = tokenizer(prompt[:image_position], add_special_tokens=False)['input_ids']
            input1 = [tokenizer.unk_token_id] * self.image_length
            input2 = tokenizer(prompt[image_position:], add_special_tokens=False)['input_ids']
            inputs = sum([input0, input1, input2], [])
            inputs = {
                "input_ids": paddle.to_tensor([tokenizer.build_inputs_with_special_tokens(inputs)], dtype=paddle.int64),
                "pre_image_length": len(input0),
                "images": paddle_image}
        else:
            inputs = tokenizer([prompt], return_tensors="pt")
            inputs = inputs
            inputs["pre_image_length"] = 0
        return inputs

    def mark_only_language_projection_as_trainable(self) -> None:
        # freeze pretrained model
        for _, weight in self.vision_model.state_dict().items():
            weight.stop_gradient = True
        for _, weight in self.language_model.state_dict().items():
            weight.stop_gradient = True
        for _, weight in self.qformer.state_dict().items():
            weight.stop_gradient = True
        # train language_projection only
        for _, weight in self.language_projection.state_dict().items():
            weight.stop_gradient = False
          
    def print_trainable_parameters(self) -> None:
        freeze_numel = 0
        trainable_numel = 0
        for _, weight in self.vision_model.state_dict().items():
            if weight.stop_gradient:
                freeze_numel += weight.numel().item()
            else:
                trainable_numel += weight.numel().item()
                
        for _, weight in self.language_model.state_dict().items():
            if weight.stop_gradient:
                freeze_numel += weight.numel().item()
            else:
                trainable_numel += weight.numel().item()
        
        for _, weight in self.qformer.state_dict().items():
            if weight.stop_gradient:
                freeze_numel += weight.numel().item()
            else:
                trainable_numel += weight.numel().item()
                      
        for _, weight in self.language_projection.state_dict().items():
            if weight.stop_gradient:
                freeze_numel += weight.numel().item()
            else:
                trainable_numel += weight.numel().item()
        logger.info(
            f"Frozen parameters: {freeze_numel:.2e} || Trainable parameters:{trainable_numel:.2e} || Total parameters:{freeze_numel+trainable_numel:.2e}|| Trainable:{trainable_numel / (freeze_numel+trainable_numel):.2%}"
        )
      
    @paddle.no_grad()
    def chat(self, tokenizer:ChatGLMTokenizer, image_path: str, query: str, history: List[Tuple[str, str]] = None, max_length: int = 1024,
             min_length=100, do_sample=True, top_p=0.4, top_k=5, temperature=0.8, repetition_penalty=1.2, logits_processor=None, **kwargs):
        if history is None:
            history = []
        if logits_processor is None:
            logits_processor = LogitsProcessorList()
        logits_processor.append(InvalidScoreLogitsProcessor())
        gen_kwargs = {"max_length": max_length, "min_length": min_length, "do_sample": do_sample, "top_p": top_p,
                      "top_k": top_k, "temperature": temperature, "repetition_penalty": repetition_penalty, "decode_strategy": "greedy_search",
                      "logits_processor": logits_processor, **kwargs}
        inputs = self.build_inputs_with_image(tokenizer, image_path, query, history=history)    # return: "input_ids", "pre_image_length", "images"
        outputs = self.generate(**inputs, **gen_kwargs)
        outputs = outputs[0]
        result = []
        for x in outputs.tolist():
            res = tokenizer.decode(x, skip_special_tokens=True)
            result.append(res)
        response = result
        return response

    def image_encoder(self, pixel_values:paddle.Tensor):
        # step 1: forward the images through the vision encoder,
        # to get image embeddings of shape (batch_size, seq_len, hidden_size)
        vision_outputs = self.vision_model(
            pixel_values=pixel_values,
        )
        image_embeds = vision_outputs[0]
        # step 2: forward the query tokens through the QFormer, using the image embeddings for cross-attention
        image_attention_mask = paddle.ones(image_embeds.shape[:-1], dtype="int64")
        query_tokens = self.query_tokens.expand([image_embeds.shape[0], -1, -1])    # [B, N, D]
        query_outputs = self.qformer(
            query_embeds=query_tokens,
            encoder_hidden_states=image_embeds,
            encoder_attention_mask=image_attention_mask,
        )
        query_output = query_outputs[0]
        return query_output
    
    def forward(
            self,
            input_ids: Optional[paddle.Tensor] = None,
            position_ids: Optional[paddle.Tensor] = None,
            attention_mask: Optional[paddle.Tensor] = None,
            images: Optional[paddle.Tensor] = None,
            pre_image_length: Optional[int] = None,
            past_key_values: Optional[Tuple[paddle.Tensor]] = None,
            inputs_embeds: Optional[paddle.Tensor] = None,
            labels: Optional[paddle.Tensor] = None,
            use_cache: Optional[bool] = None,
            output_attentions: Optional[bool] = None,
            output_hidden_states: Optional[bool] = None,
            return_dict: Optional[bool] = None,
            cache=None,
    ):
        if inputs_embeds is None and cache is None and images is not None:
            # logger.info(f"images shape: {images.shape}")
            image_embeds = self.image_encoder(images)
            image_embeds = self.language_projection(image_embeds)
            # image_embeds = images
            pre_id, pads, post_id = paddle.split(input_ids,
                                                       [pre_image_length, self.image_length, -1],
                                                       axis=1)  # image after [Round 0]\n问：<img>
            pre_txt_emb = self.language_model.get_input_embeddings()(pre_id)
            post_txt_emb = self.language_model.get_input_embeddings()(post_id)
            inputs_embeds = paddle.concat([pre_txt_emb, image_embeds, post_txt_emb], axis=1)
            input_ids = None
        ret = self.language_model.forward(
            input_ids=input_ids,
            position_ids=position_ids,
            attention_mask=attention_mask,
            inputs_embeds=inputs_embeds,
            labels=labels,
            use_cache=use_cache,
            cache=cache,
            return_dict=return_dict
        )
        if len(ret) == 2:
            ret = (paddle.cast(ret[0], dtype='float32'), ret[1])
        return ret
    
    

def construct_config():
    path = os.path.dirname(os.path.abspath(__file__))
    # 使用blip2和chatglm的config构造visualglm的config
    chatglm_cfg:ChatGLMConfig = ChatGLMConfig.from_pretrained("THUDM/chatglm-6b")
    blip2cfg:Blip2Config = Blip2Config.from_pretrained("Salesforce/blip2-flan-t5-xl")
    visualglm_cfg = VisualGLMConfig.from_vision_qformer_text_configs(blip2cfg.vision_config, blip2cfg.qformer_config, chatglm_cfg)
    visualglm_cfg.save_pretrained(os.path.join(path, './visualglm'))
    

def construct_model():
    paddle.set_default_dtype('float16')
    path = os.path.dirname(os.path.abspath(__file__))
    cfg = VisualGLMConfig.from_json_file(os.path.join(path, './visualglm/config.json'))
    model:VisualGLM = VisualGLM(cfg)
    blip2model = Blip2ForConditionalGeneration.from_pretrained("Salesforce/blip2-flan-t5-xl", dtype='float16')
    chatgml_model = ChatGLMForConditionalGeneration.from_pretrained("THUDM/chatglm-6b", dtype='float16')
    model.vision_model = blip2model.vision_model
    model.query_tokens = blip2model.query_tokens
    model.qformer = blip2model.qformer
    model.language_model = chatgml_model
    model.save_pretrained(os.path.join(path, './visualglm'))
    
    
def main():
    paddle.set_default_dtype('float32')
    path = os.path.dirname(os.path.abspath(__file__))
    img_path = os.path.join(path, './fewshot-data/2p.png')
    cfg = VisualGLMConfig.from_json_file(os.path.join(path, './visualglm/config.json'))
    model = VisualGLM(cfg)
    # model = VisualGLM.from_pretrained(os.path.join(path, "./checkpoints/visualglm"), dtype='float16')
    model.eval()
    tokenizer = ChatGLMTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True)
    response = model.chat(tokenizer, img_path, '描述这张图片。')
    logger.info(f"response: {response}")
    
   
if __name__ == "__main__":
    #main()
    # construct_config()
    # main()
    construct_model()