# coding=utf-8
# Copyright 2024 HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import math
from typing import Optional, Union, List, Dict, Any

import torch
from torch import nn

try:
    from transformers.models.qwen2_vl.modeling_qwen2_vl import (
        Qwen2VLForConditionalGeneration,
        Qwen2VLModel,
        Qwen2VLConfig,
        Qwen2VLCausalLMOutputWithPast,
        Qwen2VLModelOutputWithPast,
    )
except ImportError:
    # 如果Qwen2VL不可用，使用通用基类
    from transformers import (
        PreTrainedModel,
        PretrainedConfig,
        AutoModelForCausalLM,
        AutoConfig,
    )
    
    class Qwen2VLConfig(PretrainedConfig):
        model_type = "qwen2_vl"
        
    class Qwen2VLForConditionalGeneration(PreTrainedModel):
        config_class = Qwen2VLConfig
        
    class Qwen2VLModel(PreTrainedModel):
        config_class = Qwen2VLConfig
        
    class Qwen2VLCausalLMOutputWithPast:
        pass
        
    class Qwen2VLModelOutputWithPast:
        pass

from transformers.configuration_utils import PretrainedConfig
from transformers.utils import logging
from transformers.models.auto import CONFIG_MAPPING, AutoConfig


logger = logging.get_logger(__name__)


class Qwen25VLConfig(Qwen2VLConfig):
    r"""
    This is the configuration class to store the configuration of a [`Qwen25VLForConditionalGeneration`]. 
    It is used to instantiate a Qwen2.5-VL model according to the specified arguments, defining the model architecture.
    
    Configuration objects inherit from [`Qwen2VLConfig`] and can be used to control the model outputs. 
    Read the documentation from [`PretrainedConfig`] for more information.

    Args:
        vision_config (`Union[AutoConfig, dict]`,  *optional*, defaults to `CLIPVisionConfig`):
            The config object or dictionary of the vision backbone.
        text_config (`Union[AutoConfig, dict]`, *optional*, defaults to `Qwen2Config`):
            The config object or dictionary of the text backbone.
        image_token_index (`int`, *optional*, defaults to 151655):
            The image token index to encode the image prompt.
        video_token_index (`int`, *optional*, defaults to 151656):
            The video token index to encode the video prompt.
        vision_start_token_index (`int`, *optional*, defaults to 151652):
            The vision start token index.
        vision_end_token_index (`int`, *optional*, defaults to 151653):
            The vision end token index.
        image_start_token_index (`int`, *optional*, defaults to 151649):
            The image start token index.
        image_end_token_index (`int`, *optional*, defaults to 151650):
            The image end token index.
        video_start_token_index (`int`, *optional*, defaults to 151647):
            The video start token index.
        video_end_token_index (`int`, *optional*, defaults to 151648):
            The video end token index.
        spatial_merge_size (`int`, *optional*, defaults to 2):
            The spatial merge size for vision features.
        temporal_patch_size (`int`, *optional*, defaults to 2):
            The temporal patch size for video features.
        merge_size (`int`, *optional*, defaults to 2):
            The merge size for vision features.
    """

    model_type = "qwen2_5_vl"
    
    def __init__(
        self,
        vision_config=None,
        text_config=None,
        image_token_index=151655,
        video_token_index=151656,
        vision_start_token_index=151652,
        vision_end_token_index=151653,
        image_start_token_index=151649,
        image_end_token_index=151650,
        video_start_token_index=151647,
        video_end_token_index=151648,
        spatial_merge_size=2,
        temporal_patch_size=2,
        merge_size=2,
        **kwargs,
    ):
        self.image_token_index = image_token_index
        self.video_token_index = video_token_index
        self.vision_start_token_index = vision_start_token_index
        self.vision_end_token_index = vision_end_token_index
        self.image_start_token_index = image_start_token_index
        self.image_end_token_index = image_end_token_index
        self.video_start_token_index = video_start_token_index
        self.video_end_token_index = video_end_token_index
        self.spatial_merge_size = spatial_merge_size
        self.temporal_patch_size = temporal_patch_size
        self.merge_size = merge_size
        
        # 继承父类配置
        super().__init__(
            vision_config=vision_config,
            text_config=text_config,
            **kwargs
        )


class Qwen25VLModelOutputWithPast(Qwen2VLModelOutputWithPast):
    r"""
    Base class for Qwen2.5-VL model's outputs that may also contain a past key/values (to speed up sequential decoding).
    """
    pass


class Qwen25VLCausalLMOutputWithPast(Qwen2VLCausalLMOutputWithPast):
    r"""
    Base class for Qwen2.5-VL causal language model (or autoregressive) outputs.
    """
    pass


class Qwen25VLModel(Qwen2VLModel):
    """
    Qwen2.5-VL Model that inherits from Qwen2VL with enhanced capabilities.
    """
    
    config_class = Qwen25VLConfig
    
    def __init__(self, config: Qwen25VLConfig, **kwargs):
        super().__init__(config, **kwargs)
        
    def forward(
        self,
        input_ids: torch.LongTensor = None,
        attention_mask: Optional[torch.Tensor] = None,
        position_ids: Optional[torch.LongTensor] = None,
        past_key_values: Optional[List[torch.FloatTensor]] = None,
        inputs_embeds: Optional[torch.FloatTensor] = None,
        pixel_values: Optional[torch.FloatTensor] = None,
        image_grid_thw: Optional[torch.LongTensor] = None,
        video_grid_thw: Optional[torch.LongTensor] = None,
        use_cache: Optional[bool] = None,
        output_attentions: Optional[bool] = None,
        output_hidden_states: Optional[bool] = None,
        return_dict: Optional[bool] = None,
        **kwargs,
    ):
        # 调用父类的forward方法
        return super().forward(
            input_ids=input_ids,
            attention_mask=attention_mask,
            position_ids=position_ids,
            past_key_values=past_key_values,
            inputs_embeds=inputs_embeds,
            pixel_values=pixel_values,
            image_grid_thw=image_grid_thw,
            video_grid_thw=video_grid_thw,
            use_cache=use_cache,
            output_attentions=output_attentions,
            output_hidden_states=output_hidden_states,
            return_dict=return_dict,
            **kwargs,
        )


class Qwen25VLForConditionalGeneration(Qwen2VLForConditionalGeneration):
    """
    Qwen2.5-VL Model for conditional generation that inherits from Qwen2VL.
    This model can be used for both image and video understanding tasks.
    """
    
    config_class = Qwen25VLConfig
    
    def __init__(self, config: Qwen25VLConfig):
        super().__init__(config)
        
    def forward(
        self,
        input_ids: torch.LongTensor = None,
        attention_mask: Optional[torch.Tensor] = None,
        position_ids: Optional[torch.LongTensor] = None,
        past_key_values: Optional[List[torch.FloatTensor]] = None,
        inputs_embeds: Optional[torch.FloatTensor] = None,
        labels: Optional[torch.LongTensor] = None,
        pixel_values: Optional[torch.FloatTensor] = None,
        image_grid_thw: Optional[torch.LongTensor] = None,
        video_grid_thw: Optional[torch.LongTensor] = None,
        use_cache: Optional[bool] = None,
        output_attentions: Optional[bool] = None,
        output_hidden_states: Optional[bool] = None,
        return_dict: Optional[bool] = None,
        **kwargs,
    ):
        # 调用父类的forward方法
        return super().forward(
            input_ids=input_ids,
            attention_mask=attention_mask,
            position_ids=position_ids,
            past_key_values=past_key_values,
            inputs_embeds=inputs_embeds,
            labels=labels,
            pixel_values=pixel_values,
            image_grid_thw=image_grid_thw,
            video_grid_thw=video_grid_thw,
            use_cache=use_cache,
            output_attentions=output_attentions,
            output_hidden_states=output_hidden_states,
            return_dict=return_dict,
            **kwargs,
        )
    
    def generate(
        self,
        inputs: Optional[torch.Tensor] = None,
        pixel_values: Optional[torch.FloatTensor] = None,
        image_grid_thw: Optional[torch.LongTensor] = None,
        video_grid_thw: Optional[torch.LongTensor] = None,
        **kwargs,
    ):
        """
        Enhanced generate method for Qwen2.5-VL with multimodal support.
        """
        return super().generate(
            inputs=inputs,
            pixel_values=pixel_values,
            image_grid_thw=image_grid_thw,
            video_grid_thw=video_grid_thw,
            **kwargs,
        )


# 注册配置
try:
    CONFIG_MAPPING.register("qwen2_5_vl", Qwen25VLConfig)
    logger.info("Qwen2.5-VL配置注册成功")
except Exception as e:
    logger.warning(f"配置注册失败: {e}")
    # 继续执行，不影响模型加载


__all__ = [
    "Qwen25VLConfig",
    "Qwen25VLForConditionalGeneration",
    "Qwen25VLModel",
    "Qwen25VLModelOutputWithPast",
    "Qwen25VLCausalLMOutputWithPast",
]