# Copyright (c) OpenMMLab. All rights reserved.
from typing import List, Optional, Sequence, Tuple, Union

import torch
from pytorch_wavelets import DWTForward, DWTInverse
from mmengine.model import BaseDataPreprocessor, stack_batch

from mmaction.registry import MODELS
from mmaction.utils import SampleList


@MODELS.register_module()
class ActionVenusDataPreprocessor(BaseDataPreprocessor):
    """Data pre-processor for action recognition tasks.

    Args:
        mean (Sequence[float or int], optional): The pixel mean of channels
            of images or stacked optical flow. Defaults to None.
        std (Sequence[float or int], optional): The pixel standard deviation
            of channels of images or stacked optical flow. Defaults to None.
        to_rgb (bool): Whether to convert image from BGR to RGB.
            Defaults to False.
        to_float32 (bool): Whether to convert data to float32.
            Defaults to True.
        blending (dict, optional): Config for batch blending.
            Defaults to None.
        format_shape (str): Format shape of input data.
            Defaults to ``'NCHW'``.
    """

    def __init__(self,
                 mean: Optional[Sequence[Union[float, int]]] = None,
                 std: Optional[Sequence[Union[float, int]]] = None,
                 to_rgb: bool = False,
                 to_float32: bool = True,
                 wavelet: str ='db4', 
                 levels: int =3, 
                 blending: Optional[dict] = None,
                 format_shape: str = 'NCHW') -> None:
        super().__init__()
        self.to_rgb = to_rgb
        self.to_float32 = to_float32
        self.format_shape = format_shape

        self.motion_mean = torch.tensor([-0.1812, -0.1015])
        self.motion_std = torch.tensor([2.8018, 3.2131])

        self.wavelet = wavelet
        self.levels = levels
        # 初始化小波变换和逆变换
        self.dwt = DWTForward(J=self.levels, wave=self.wavelet, mode='zero').to(self.device)
        self.idwt = DWTInverse(wave=self.wavelet, mode='zero').to(self.device)
        if self.dwt is None or self.idwt is None:
            raise ValueError('Wavelet filters are not initialized. '
                             'Please set "wavelet_init" in the pipeline.')

        if mean is not None:
            assert std is not None, 'To enable the normalization in ' \
                                    'preprocessing, please specify both ' \
                                    '`mean` and `std`.'
            # Enable the normalization in preprocessing.
            self._enable_normalize = True
            if self.format_shape == 'NCHW':
                normalizer_shape = (-1, 1, 1)
            elif self.format_shape in ['NCTHW', 'MIX2d3d']:
                normalizer_shape = (-1, 1, 1, 1)
            else:
                raise ValueError(f'Invalid format shape: {format_shape}')

            self.register_buffer(
                'mean',
                torch.tensor(mean, dtype=torch.float32).view(normalizer_shape),
                False)
            self.register_buffer(
                'std',
                torch.tensor(std, dtype=torch.float32).view(normalizer_shape),
                False)
        else:
            self._enable_normalize = False

        if blending is not None:
            self.blending = MODELS.build(blending)
        else:
            self.blending = None

    def _filter_coefficients(self, coeffs):
        """
        对小波系数进行滤波处理（优化版本）
        """
        LL = coeffs[0]
        band_coeffs = coeffs[1]
        
        # 对低频分量进行轻微增强
        LL = LL * 1.1
        
        # 对高频分量进行软阈值处理（向量化）
        threshold = 0.05
        for i in range(len(band_coeffs)):
            # 确保当前频带的所有方向系数都是张量
            if not all(isinstance(coeff, torch.Tensor) for coeff in band_coeffs[i]):
                continue
                
            # 一次性处理所有方向的高频系数
            # 使用 tuple() 确保传递给 stack 的是元组
            coeff_stack = torch.stack(tuple(band_coeffs[i]), dim=0)
            sign = torch.sign(coeff_stack)
            abs_val = torch.abs(coeff_stack)
            thresholded = sign * torch.relu(abs_val - threshold)
            
            # 将结果存回列表
            for j in range(len(band_coeffs[i])):
                band_coeffs[i][j] = thresholded[j]
        
        return (LL, band_coeffs)

    def _filter_input(self, input):
        """
        对image data进行小波滤波处理
        """
        # 确保输入为浮点类型 (FloatTensor)
        # 如果 input 是 ByteTensor (uint8, 范围0-255)，通常先转为Float，并缩放至[0,1]
        if input.dtype == torch.uint8:
            input = input.float() / 255.0  # 转换为FloatTensor，并归一化到[0,1]
        # 或者，如果已经是Float但范围不对，或者你不想归一化，直接转换类型：
        # input = input.float()
                 
        time_steps, channels, batch_size, height, width = input.shape
        
        # 重塑为 [batch_size * time_steps, channels, height, width] 以适应小波变换
        input = input.reshape(-1, channels, height, width)
        

        # 应用小波变换
        input = self.dwt(input)
        
        # 对小波系数进行滤波处理
        input = self._filter_coefficients(input)
        
        # 逆小波变换重构图像
        input = self.idwt(input)
        
        # 确保数据在正确范围内
        input = torch.clamp(input, 0, 1)
        
        # 恢复原始形状 [batch_size, time_steps, channels, height, width]
        input = input.reshape(time_steps, channels, batch_size, height, width)

        input = input * 255.0
        input = input.type(torch.uint8)
        return input

    def forward(self,
                data: Union[dict, Tuple[dict]],
                training: bool = False) -> Union[dict, Tuple[dict]]:
        """Perform normalization, padding, bgr2rgb conversion and batch
        augmentation based on ``BaseDataPreprocessor``.

        Args:
            data (dict or Tuple[dict]): data sampled from dataloader.
            training (bool): Whether to enable training time augmentation.

        Returns:
            dict or Tuple[dict]: Data in the same format as the model input.
        """
        data = self.cast_data(data)
        if isinstance(data, dict):
            return self.forward_onesample(data, training=training)
        elif isinstance(data, (tuple, list)):
            outputs = []
            for data_sample in data:
                output = self.forward_onesample(data_sample, training=training)
                outputs.append(output)
            return tuple(outputs)
        else:
            raise TypeError(f'Unsupported data type: {type(data)}!')

    def forward_onesample(self, data, training: bool = False) -> dict:
        """Perform normalization, padding, bgr2rgb conversion and batch
        augmentation on one data sample.

        Args:
            data (dict): data sampled from dataloader.
            training (bool): Whether to enable training time augmentation.

        Returns:
            dict: Data in the same format as the model input.
        """
        # ------------- imgs Filter Input -------------
        inputs, data_samples = data['inputs']['imgs'], data['data_samples']
        for input in inputs:
            input = self._filter_input(input)
        inputs, data_samples = self.preprocess(inputs, data_samples, training)
        data['inputs']['imgs'] = inputs
        # --------- motion_vectors (optical flow) ---------
        inputs = data['inputs']['motion_vectors']
        #self.compute_motion_statistics(inputs)
        batch_inputs = torch.stack(inputs)
        batch_inputs = self.process_motion_vectors(batch_inputs, data_samples, training)
        data['inputs']['motion_vectors'] = batch_inputs
        # ------------------ labels -------------------
        data['data_samples'] = data_samples
        return data

    def compute_motion_statistics(self, all_motions):
        """计算运动矢量的均值和标准差"""
 
        # 合并所有运动向量
        all_motions = torch.cat(all_motions, dim=0)
        # 显式转换为 float32
        all_motions = all_motions.type(torch.float32)
        
        # 计算每个通道的均值和标准差
        # 将形状从 (N, 31, 14, 14, 2) 变为 (N*31*14*14, 2)
        motions_flat = all_motions.reshape(-1, 2)
        
        self.motion_mean = motions_flat.mean(dim=0)  # 形状: (2,)
        self.motion_std = motions_flat.std(dim=0)    # 形状: (2,)
        
        #print(f"计算得到的运动向量均值: {self.motion_mean}")
        #print(f"计算得到的运动向量标准差: {self.motion_std}")

    def process_motion_vectors(self, batch_motion_vectors, data_samples=None, training=False):
        """使用广播机制处理运动矢量数据"""
        
        # 确保数据类型正确
        if batch_motion_vectors.dtype != torch.float32:
            batch_motion_vectors = batch_motion_vectors.to(torch.float32)
        
        # --- 标准化 ---
        if self._enable_normalize:
            # 使用广播机制进行标准化，不需要改变形状
            # mean 和 std 的形状应该是 (1, 1, 1, 1, 2) 以便与 (N, 31, 14, 14, 2) 广播
            
            mean = self.motion_mean.to(batch_motion_vectors.device)
            std = self.motion_std.to(batch_motion_vectors.device)
            
            # 确保 mean 和 std 有正确的形状用于广播
            if len(mean.shape) == 1:
                mean = mean.view(1, 1, 1, 1, -1)  # 变为 (1, 1, 1, 1, 2)
                std = std.view(1, 1, 1, 1, -1)    # 变为 (1, 1, 1, 1, 2)
            
            batch_motion_vectors = (batch_motion_vectors - mean) / std
        
        return batch_motion_vectors

    def preprocess(self,
                   inputs: List[torch.Tensor],
                   data_samples: SampleList,
                   training: bool = False) -> Tuple:
        # --- Pad and stack --
        batch_inputs = stack_batch(inputs)

        if self.format_shape == 'MIX2d3d':
            if batch_inputs.ndim == 4:
                format_shape, view_shape = 'NCHW', (-1, 1, 1)
            else:
                format_shape, view_shape = 'NCTHW', None
        else:
            format_shape, view_shape = self.format_shape, None

        # ------ To RGB ------
        if self.to_rgb:
            if format_shape == 'NCHW':
                batch_inputs = batch_inputs[..., [2, 1, 0], :, :]
            elif format_shape == 'NCTHW':
                batch_inputs = batch_inputs[..., [2, 1, 0], :, :, :]
            else:
                raise ValueError(f'Invalid format shape: {format_shape}')

        # -- Normalization ---
        if self._enable_normalize:
            if view_shape is None:
                batch_inputs = (batch_inputs - self.mean) / self.std
            else:
                mean = self.mean.view(view_shape)
                std = self.std.view(view_shape)
                batch_inputs = (batch_inputs - mean) / std
        elif self.to_float32:
            batch_inputs = batch_inputs.to(torch.float32)

        # ----- Blending -----
        if training and self.blending is not None:
            batch_inputs, data_samples = self.blending(batch_inputs,
                                                       data_samples)

        return batch_inputs, data_samples
