# Copyright (c) Alibaba, Inc. and its affiliates.
from dataclasses import dataclass, field
from typing import Any, Dict, List, Literal, Optional

import torch
import transformers
from packaging import version

from swift.utils import get_env_args
from ..base import Template
from ..constant import MLLMTemplateType
from ..register import TemplateMeta, register_template
from ..template_inputs import StdTemplateInputs
from ..utils import Context, Prompt, findall
from ..vision_utils import load_video_llava
from .llama import Llama3TemplateMeta
from .qwen import QwenTemplateMeta
from .utils import ChatmlTemplateMeta


from swift.llm.model.model.vlm_fo1.model.multimodal_encoder.davit.image_processing_clip import CLIPImageProcessor

from transformers.models.qwen2_vl.image_processing_qwen2_vl import Qwen2VLImageProcessor


class VisionFeaturesGather:
    """
    Collects and manages intermediate features for multi-level visual representation extraction
    (used for region feature/ROIAlign task). Each forward pass (per image) builds up a list of features.
    """
    def __init__(self) -> None:
        self.features_list = []
        self.grid_thw = None
        self.window_index = None
        self.merge_size = None
    
    def reset(self):
        """Clear all states before starting a new feature-gathering process."""
        self.features_list.clear()
        self.grid_thw = None
        self.window_index = None
        self.merge_size = None
    
    def set_params(self, grid_thw, window_index, merge_size):
        """Store spatial and merge information for the current image or batch."""
        self.grid_thw = grid_thw
        self.window_index = window_index
        self.merge_size = merge_size

    def append(self, element):
        """Append a set of features (typically per layer in encoder)."""
        self.features_list.append(element)
    
    def extract_multi_level_features(self):
        """
        Assemble all gathered multi-level features into canonical tensor forms.

        The goal: for each visual sample, produce a list of region-aligned feature maps
        (e.g., multiple stage outputs for downstream region patching/ROIAlign).

        Returns:
            List of features, where each element is a list [stage1, stage2, ...] for one image.
        """
        # Concatenate all feature tensors along hidden dimension: [seq_len, hidden_size * k]
        concat_features = torch.cat(self.features_list, dim=1)
        merge_unit = self.merge_size * self.merge_size
        seq_len = concat_features.shape[0]

        # Rearrange into [windows, merge_unit, hidden_dim*layers]
        concat_features = concat_features.reshape(seq_len // merge_unit, merge_unit, -1)
        reverse_indices = torch.argsort(self.window_index)
        concat_features = concat_features[reverse_indices, :, :]
        concat_features = concat_features.reshape(seq_len, -1)
        
        # Split features for each image/video by product of grid h and w (per sample)
        split_size = (self.grid_thw[:, 1] * self.grid_thw[:, 2]).tolist()
        split_features = list(torch.split(concat_features, split_size, dim=0))
        assert len(split_features) == self.grid_thw.shape[0]
        for i in range(len(split_features)):
            # Recover original grid shape and merge windowing into stages, then split
            _, grid_h, grid_w = self.grid_thw[i]
            merge_h = grid_h // self.merge_size
            merge_w = grid_w // self.merge_size
            split_features[i] = split_features[i].reshape(merge_h, merge_w, merge_unit, -1)
            split_features[i] = split_features[i].reshape(merge_h, merge_w, self.merge_size, self.merge_size, -1)
            split_features[i] = split_features[i].permute(0, 2, 1, 3, 4)
            split_features[i] = split_features[i].flatten(start_dim=0, end_dim=-2)
            # Split [h, w, dim] into k tensors [1, dim/k, h, w] (for compatibility with multi-stage vision encoding)
            hidden_dim = split_features[i].shape[-1]
            split_dim = hidden_dim // len(self.features_list)
            split_features[i] = split_features[i].reshape(grid_h, grid_w, -1)
            split_features[i] = [
                split_features[i][..., j*split_dim:(j+1)*split_dim].permute(2, 0, 1).unsqueeze(0)
                for j in range(len(self.features_list))
            ]

        return split_features


# Global gather object to pass into Qwen2_5_VisionTransformer for monkey-patched feature gathering
GATHER = VisionFeaturesGather()


class VLMFO_1Template(Template):
    image_token_id = 151655
    video_token_id = 151656
    placeholder_tokens = ['<|image_pad|>']
    use_model = True
    support_padding_free = True
    
    
    # ========= 初始化两个视觉编码器的processor
    img_cfg = {'do_resize': False, 'size': {'height': 768, 'width': 768}, 'resample': 3, 'do_center_crop': False, 'do_rescale': True, 'do_normalize': True, 'image_mean': [0.485, 0.456, 0.406], 'image_std': [0.229, 0.224, 0.225], 'do_convert_rgb': True, 'resize_mode': 'dynamic'}
    
    # aux_image_processor = CLIPImageProcessor(**img_cfg)
    
    model_path = "/data2/wushengyu/model/VLM-FO1_Qwen2.5-VL-3B-v01"
    primary_min_pixels = 3136  # 56x56
    primary_max_pixels = 4194304  # 2048x2048
    
    # primary_image_processor = Qwen2VLImageProcessor.from_pretrained(model_path, min_pixels=primary_min_pixels, max_pixels=primary_max_pixels)


    def init_env_args(self):
        super().init_env_args()
        self.bbox_format = get_env_args('QWENVL_BBOX_FORMAT', str, 'legacy')

    def replace_tag(self, media_type: Literal['image', 'video', 'audio'], index: int,
                    inputs: StdTemplateInputs) -> List[Context]:
        from qwen_vl_utils import fetch_image, fetch_video
        assert media_type in {'image', 'video'}
        if media_type == 'image':
            inputs.images[index] = fetch_image({'image': inputs.images[index]})
            if self.mode == 'lmdeploy':
                return ['<|vision_start|>', [-100], '<|vision_end|>']
            else:
                return ['<|vision_start|><|image_pad|><|vision_end|>']
        else:
            video = inputs.videos[index]
            video, video_kwargs = fetch_video({'video': video}, return_video_sample_fps=True)
            inputs.mm_processor_kwargs.setdefault('fps', []).append(video_kwargs)
            tokens = ['<|vision_start|><|video_pad|><|vision_end|>']
            if isinstance(video, torch.Tensor):
                video = video.to(torch.uint8)
            inputs.videos[index] = video
            return tokens
        
    def replace_tag(self, media_type: Literal['image', 'video', 'audio'], index,
                    inputs: StdTemplateInputs) -> List[Context]:
        assert media_type == 'image'
        return [[-200], '\n']

    # def replace_ref(self, ref: str, index: int, inputs: StdTemplateInputs) -> List[Context]:
    #     if self.bbox_format == 'legacy':
    #         return [f'<|object_ref_start|>{ref}<|object_ref_end|>']
    #     else:
    #         return [ref]

    def replace_bbox(self, bbox: List[int], index: int, inputs: StdTemplateInputs) -> List[Context]:
        if self.bbox_format == 'legacy':
            return [f'<|box_start|>{self._get_bbox_str(bbox)}<|box_end|>']
        else:
            return [str(bbox)]
        

    @staticmethod
    def adjust_bbox(bbox_list, original_h, original_w, resize_h, resize_w):
        """
        Adjusts bounding boxes from original image size to resized image size, compensating for scaling.

        Args:
            bbox_list (List[List[float]]): List of original boxes [x1, y1, x2, y2].
            original_h (int): Original image height.
            original_w (int): Original image width.
            resize_h (int): Resized image height.
            resize_w (int): Resized image width.

        Returns:
            List[List[float]]: Bounding boxes transformed to resized image coordinates.
        """
        output_list = []
        def adjust_bbox_range(bbox, width, height):
            # Ensure all coordinates are within the original image border.
            x1, y1, x2, y2 = bbox
            x1 = max(0, min(width, x1))
            y1 = max(0, min(height, y1))
            x2 = max(0, min(width, x2))
            y2 = max(0, min(height, y2))
            return [x1, y1, x2, y2]

        for bbox in bbox_list:
            bbox = adjust_bbox_range(bbox, original_w, original_h)
            bbox[0] = bbox[0] * resize_w / original_w
            bbox[1] = bbox[1] * resize_h / original_h
            bbox[2] = bbox[2] * resize_w / original_w
            bbox[3] = bbox[3] * resize_h / original_h
            output_list.append(bbox)
        return output_list


    def _encode(self, inputs: StdTemplateInputs) -> Dict[str, Any]:  # 实现数据预处理，将图像转为tensor，且尺寸处理好。
        print("=======>>>seek_qwen.---<<_encode>>:>>000000 ")
        encoded = super()._encode(inputs)
        input_ids = encoded['input_ids']
        labels = encoded['labels']
        loss_scale = encoded.get('loss_scale', None)
        
        print("=======>>>seek_qwen.---<<_encode>>: ", encoded)
        
        # processor = self.processor

        media_type = 'images'
        
        # mm_data = getattr(inputs, media_type)
        images = inputs.images
        print("=======>>>images---seek_qwen.---<<_encode>>: ", images)
        if images:
            media_token = self.image_token_id
            
            origin_image_size = [image.size for image in images]
            aux_images = images.copy()
    
            model = self.model.model
            if not hasattr(model, 'vision_tower'):
                model = model.model
            self.aux_image_processor = model.vision_tower_aux.image_processor
            self.primary_image_processor = model.vision_tower.image_processor
            
            auxiliary_images_tensor = [self.aux_image_processor.preprocess(i, return_tensors='pt')['pixel_values'][0].to(self.model_info.torch_dtype) for i in aux_images]
            
            print("------auxiliary_images_tensor[0].shape: ", auxiliary_images_tensor[0].shape)  # pixel_values.shape: torch.Size([3, 267, 400])

            if bbox_list and len(bbox_list) > 0:
                # Limit number of bbox (for computational constraints, etc.)
                bbox_list = bbox_list[:100]
                resize_h, resize_w = auxiliary_images_tensor[0].shape[-2:]
                original_w, original_h = origin_image_size[0]
                # Adjust bbox to match resized images (post pre-processing)
                bbox_list = self.adjust_bbox(bbox_list, original_h, original_w, resize_h, resize_w)
                bbox_list = [torch.tensor(bbox_list)]
            else:
                bbox_list = None
            
            primary_images = []
            primary_image_grid_thws = []
            for im in images:
                processed_data = self.primary_image_processor.preprocess(im, videos=None, return_tensors="pt")
                image_i = processed_data['pixel_values']
                print("------processed_data['pixel_values'].shape: ", image_i.shape)  # shape: torch.Size([560, 1176]
                image_grid_thw_i = processed_data['image_grid_thw']  #  'image_grid_thw':tensor([[ 1, 20, 28]])
                print("------processed_data['image_grid_thw'].shape: ", image_grid_thw_i.shape)  # shape: torch.Size([1, 3])，针对
                primary_images.append(image_i)
                primary_image_grid_thws.append(image_grid_thw_i)
                
            primary_images_tensor = [image_i.to(self.model_info.torch_dtype) for image_i in primary_images]
            
            
            
            

            
            # media_inputs = processor.image_processor(images=mm_data, return_tensors='pt', do_resize=False)
            media_grid_thw = media_inputs['image_grid_thw']

            idx_list = findall(input_ids, media_token)
            merge_length = processor.image_processor.merge_size**2

            def _get_new_tokens(i):
                token_len = (media_grid_thw[i].prod() // merge_length)
                return [media_token] * token_len

            input_ids, labels, loss_scale = self._extend_tokens(input_ids, labels, loss_scale, idx_list,
                                                                _get_new_tokens)
            encoded.update(media_inputs)

        encoded['input_ids'] = input_ids
        encoded['labels'] = labels
        encoded['loss_scale'] = loss_scale
        return encoded

    def _post_encode(self, model, inputs: Dict[str, Any]) -> Dict[str, Any]:
        if not self.is_training:
            return inputs
        input_ids = inputs['input_ids']
        base_model = self.get_base_model(model)
        if hasattr(base_model.model, 'embed_tokens'):
            inputs_embeds = base_model.model.embed_tokens(input_ids)
        else:
            inputs_embeds = base_model.model.language_model.embed_tokens(input_ids)
        inputs_embeds = self._get_inputs_embeds_hf(inputs_embeds, inputs, model.visual, self.processor, model.config)
        return {'inputs_embeds': inputs_embeds}

register_template(ChatmlTemplateMeta(MLLMTemplateType.vlm_fo1, template_cls=VLMFO_1Template, default_system='You are a helpful assistant.'))