import os
import re
import numpy as np
from pathlib import Path
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmaction.registry import TRANSFORMS
from mmcv.transforms import BaseTransform
from typing import Dict

@TRANSFORMS.register_module()
class MotionVectors(BaseTransform):
    """使用GPU计算运动矢量。

    基于块匹配算法(BMA)在GPU上高效计算运动矢量。

    Required Keys:
        - imgs
        - frame_inds

    Added Keys:
        - motion_vectors
        - motion_confidence
    """

    def __init__(self,
                 **kwargs) -> None:
        """
        初始化运动矢量路径
        
        参数:

        """
        self.kwargs = kwargs

    def _convert_video_path(original_path):
        """
        转换视频路径格式，自动处理train/test/val目录并添加_mvs后缀
        示例输入: 'data/video-3-150/train/1affe0e40f8a12f7/1054.mp4'
        示例输出: {'filename': 'data/video-3-150/train_mvs/1affe0e40f8a12f7.npz', 'video_id': 1054}
        """
        # 标准化路径分隔符
        normalized_path = original_path.replace('\\', '/')
        
        # 提取关键部分
        dir_part, file_part = os.path.split(normalized_path)
        base_dir = os.path.dirname(dir_part)
        folder_name = os.path.basename(dir_part)
        video_id = os.path.splitext(file_part)[0]
        
        # 匹配并替换train/test/val
        pattern = re.compile(r'(train|test|val)(?=[/\\]|$)')
        match = pattern.search(dir_part)
        if match:
            dataset_type = match.group(1)
            new_dir = dir_part.replace(dataset_type, f"{dataset_type}_mvs")
            new_filename = os.path.join(os.path.dirname(new_dir), f"{folder_name}.npz")
        else:
            raise ValueError("路径中未找到train/test/val目录")
        
        return {
            'filename': new_filename.replace('\\', '/'),
            'video_id': int(video_id)
        }

    def transform(self, results: Dict) -> Dict:
        """从mvs path导入motion_vectors。

        Args:
            results (dict): 结果字典。

        Returns:
            dict: 包含运动矢量结果的结果字典。
        """

        # 拆分原始路径
        # 例如: 'filename': 'data/video-3-150/train\\1affe0e40f8a12f7/1054.mp4'
        filename = results['filename']
        file_path = Path(filename)
        mvs_path = file_path.with_suffix('.npy')
        motion_vectors = np.load(mvs_path)
        # motion_vectors数组最前面插入一个全零帧，表示第一帧没有运动矢量
        zero_frame = np.zeros_like(motion_vectors[0:1])
        motion_vectors = np.concatenate((zero_frame, motion_vectors), axis=0)
        # 按frame_inds顺序抽取运动矢量
        frame_inds = results['frame_inds']
        motion_vectors = motion_vectors[frame_inds]
        
        # 保存结果
        results['motion_vectors'] = motion_vectors
        
        return results


@TRANSFORMS.register_module()
class GPUMotionVectors(BaseTransform):
    """使用GPU计算运动矢量。

    基于块匹配算法(BMA)在GPU上高效计算运动矢量。

    Required Keys:
        - imgs
        - frame_inds

    Added Keys:
        - motion_vectors
        - motion_confidence
    """

    def __init__(self,
                 block_size=16,
                 search_range=16,
                 device='cuda',
                 **kwargs) -> None:
        """
        初始化GPU运动矢量计算器
        
        参数:
            block_size: 块大小，默认16x16
            search_range: 搜索范围，默认16像素
            device: 计算设备，默认'cuda'
        """
        self.block_size = block_size
        self.search_range = search_range
        self.device = device
        self.kwargs = kwargs
        
        # 初始化块匹配算法
        self.bma = BlockMatchingAlgorithm(
            block_size=block_size,
            search_range=search_range,
            device=device
        )

    def transform(self, results: Dict) -> Dict:
        """执行运动矢量计算。

        Args:
            results (dict): 结果字典。

        Returns:
            dict: 包含运动矢量结果的结果字典。
        """
        device = torch.device(self.device if torch.cuda.is_available() else 'cpu')
        device = self.device if self.device == 'cpu' else device

        imgs = results['inputs']
        
        # 确保输入是torch张量
        if not isinstance(imgs, torch.Tensor):
            imgs = torch.tensor(imgs, dtype=torch.float32)
        
        # 将数据移到设备
        imgs = imgs.to(device)
        
        # 确保数据在0-1范围内
        if imgs.max() > 1.0:
            imgs = imgs / 255.0
        
        # 获取视频帧的形状 (T, C, H, W)
        time_steps, channels, batch_size, height, width = imgs.shape
        
        # 计算连续帧之间的运动矢量
        motion_vectors = []
        confidences = []
        
        for i in range(time_steps - 1):
            frame1 = imgs[i]  # 当前帧
            frame2 = imgs[i+1]  # 下一帧
            
            try:
                # 计算运动矢量
                mv, confidence = self.bma(frame1, frame2)
                
                motion_vectors.append(mv)
                confidences.append(confidence)
            except Exception as e:
                print(f"计算第 {i} 和 {i+1} 帧之间的运动矢量时出错: {e}")
                # 添加零运动矢量作为占位符
                h_blocks = height // self.block_size
                w_blocks = width // self.block_size
                zero_mv = torch.zeros(2, h_blocks, w_blocks, device=device)
                zero_conf = torch.zeros(1, h_blocks, w_blocks, device=device)
                motion_vectors.append(zero_mv)
                confidences.append(zero_conf)
        
        # 检查是否有计算出的运动矢量
        if len(motion_vectors) == 0:
            # 如果没有计算出的运动矢量，创建空张量
            h_blocks = height // self.block_size
            w_blocks = width // self.block_size
            motion_vectors = torch.zeros(0, 2, h_blocks, w_blocks, device=device)
        else:
            # 将运动矢量堆叠成一个张量
            motion_vectors = torch.stack(motion_vectors, dim=0)
        motion_vectors = motion_vectors.round().to(torch.int8)
        
        # 保存结果
        results['motion_vectors'] = motion_vectors
        
        return results


class BlockMatchingAlgorithm(nn.Module):
    """基于块匹配算法的GPU运动矢量计算"""
    
    def __init__(self, block_size=16, search_range=16, device='cuda'):
        super().__init__()
        self.block_size = block_size
        self.search_range = search_range
        self.device = device
        
        # 预计算搜索偏移量
        self._precompute_search_offsets()
        
    def _precompute_search_offsets(self):
        """预计算搜索偏移量以提高性能"""
        # 创建搜索范围内的所有可能偏移
        offsets = []
        for dy in range(-self.search_range, self.search_range + 1):
            for dx in range(-self.search_range, self.search_range + 1):
                offsets.append((dy, dx))
        
        self.offsets = torch.tensor(offsets, device=self.device, dtype=torch.float32)
        self.num_offsets = len(offsets)
    
    def forward(self, frame1, frame2):
        """
        计算两帧之间的运动矢量
        
        参数:
            frame1: 参考帧 (C, H, W)
            frame2: 目标帧 (C, H, W)
            
        返回:
            motion_vectors: 运动矢量 (2, H//block_size, W//block_size)
            confidence: 置信度图 (1, H//block_size, W//block_size)
        """
        # 确保输入是三维的 (C, H, W)
        if frame1.dim() == 3:
            frame1 = frame1.unsqueeze(0)  # 添加批次维度
            frame2 = frame2.unsqueeze(0)
        
        # 转换为灰度图像以提高性能
        if frame1.shape[1] == 3:
            frame1_gray = 0.299 * frame1[:, 0] + 0.587 * frame1[:, 1] + 0.114 * frame1[:, 2]
            frame2_gray = 0.299 * frame2[:, 0] + 0.587 * frame2[:, 1] + 0.114 * frame2[:, 2]
        else:
            frame1_gray = frame1
            frame2_gray = frame2
        
        # 获取图像尺寸
        batch_size, height, width = frame1_gray.shape
        
        # 计算块的数量
        h_blocks = height // self.block_size
        w_blocks = width // self.block_size
        
        # 提取参考帧中的所有块
        ref_blocks = self._extract_blocks(frame1_gray, self.block_size)
        
        # 初始化运动矢量和置信度
        motion_vectors = torch.zeros(batch_size, 2, h_blocks, w_blocks, device=self.device)
        confidence = torch.zeros(batch_size, 1, h_blocks, w_blocks, device=self.device)
        
        # 对每个块进行并行处理
        for i in range(h_blocks):
            for j in range(w_blocks):
                # 获取当前参考块
                ref_block = ref_blocks[:, :, i, j]
                
                # 计算当前块的中心坐标
                center_y = i * self.block_size + self.block_size // 2
                center_x = j * self.block_size + self.block_size // 2
                
                # 定义搜索区域边界
                min_y = max(0, center_y - self.search_range)
                max_y = min(height - self.block_size, center_y + self.search_range)
                min_x = max(0, center_x - self.search_range)
                max_x = min(width - self.block_size, center_x + self.search_range)
                
                # 提取搜索区域
                search_region = frame2_gray[:, min_y:max_y, min_x:max_x]
                
                # 计算所有可能偏移的SAD
                best_sad = float('inf')
                best_dy, best_dx = 0, 0
                
                # 使用向量化计算提高性能
                for dy in range(min_y, max_y - self.block_size + 1):
                    for dx in range(min_x, max_x - self.block_size + 1):
                        # 提取目标块
                        target_block = frame2_gray[:, dy:dy+self.block_size, dx:dx+self.block_size]
                        
                        # 计算SAD
                        sad = torch.sum(torch.abs(ref_block - target_block))
                        
                        # 更新最佳匹配
                        if sad < best_sad:
                            best_sad = sad
                            best_dy = dy - center_y
                            best_dx = dx - center_x
                
                # 保存运动矢量和置信度
                motion_vectors[:, 0, i, j] = best_dx
                motion_vectors[:, 1, i, j] = best_dy
                confidence[:, 0, i, j] = 1.0 / (best_sad + 1e-6)  # 避免除以零
        
        return motion_vectors.squeeze(0), confidence.squeeze(0)
    
    def _extract_blocks(self, img, block_size):
        """
        从图像中提取所有块
        
        参数:
            img: 输入图像 (B, H, W)
            block_size: 块大小
            
        返回:
            blocks: 提取的块 (B, block_size, block_size, H//block_size, W//block_size)
        """
        batch_size, height, width = img.shape
        h_blocks = height // block_size
        w_blocks = width // block_size
        
        # 使用unfold操作提取块
        blocks = img.unfold(1, block_size, block_size).unfold(2, block_size, block_size)
        
        return blocks


class FastBlockMatching(nn.Module):
    """更快的块匹配算法实现，使用PyTorch的向量化操作"""
    
    def __init__(self, block_size=16, search_range=16, device='cuda'):
        super().__init__()
        self.block_size = block_size
        self.search_range = search_range
        self.device = device
        
        # 创建用于计算SAD的卷积核
        self.sad_kernel = torch.ones(1, 1, block_size, block_size, device=device)
    
    def forward(self, frame1, frame2):
        """
        使用向量化操作计算运动矢量
        
        参数:
            frame1: 参考帧 (C, H, W)
            frame2: 目标帧 (C, H, W)
            
        返回:
            motion_vectors: 运动矢量 (2, H//block_size, W//block_size)
            confidence: 置信度图 (1, H//block_size, W//block_size)
        """
        # 确保输入是三维的 (C, H, W)
        if frame1.dim() == 3:
            frame1 = frame1.unsqueeze(0)  # 添加批次维度
            frame2 = frame2.unsqueeze(0)
        
        # 转换为灰度图像以提高性能
        if frame1.shape[1] == 3:
            frame1_gray = 0.299 * frame1[:, 0] + 0.587 * frame1[:, 1] + 0.114 * frame1[:, 2]
            frame2_gray = 0.299 * frame2[:, 0] + 0.587 * frame2[:, 1] + 0.114 * frame2[:, 2]
        else:
            frame1_gray = frame1
            frame2_gray = frame2
        
        batch_size, height, width = frame1_gray.shape
        
        # 计算块的数量
        h_blocks = height // self.block_size
        w_blocks = width // self.block_size
        
        # 提取参考帧中的所有块
        ref_blocks = self._extract_blocks(frame1_gray, self.block_size)
        
        # 初始化运动矢量和置信度
        motion_vectors = torch.zeros(batch_size, 2, h_blocks, w_blocks, device=self.device)
        confidence = torch.zeros(batch_size, 1, h_blocks, w_blocks, device=self.device)
        
        # 对每个块进行处理
        for i in range(h_blocks):
            for j in range(w_blocks):
                # 获取当前参考块
                ref_block = ref_blocks[:, :, i, j]
                
                # 计算当前块的中心坐标
                center_y = i * self.block_size + self.block_size // 2
                center_x = j * self.block_size + self.block_size // 2
                
                # 定义搜索区域边界
                min_y = max(0, center_y - self.search_range)
                max_y = min(height, center_y + self.search_range + 1)
                min_x = max(0, center_x - self.search_range)
                max_x = min(width, center_x + self.search_range + 1)
                
                # 提取搜索区域
                search_region = frame2_gray[:, min_y:max_y, min_x:max_x]
                
                # 如果搜索区域太小，跳过
                if search_region.shape[1] < self.block_size or search_region.shape[2] < self.block_size:
                    continue
                
                # 使用卷积计算SAD
                # 扩展维度以进行卷积
                ref_expanded = ref_block.unsqueeze(0)  # (1, 1, block_size, block_size)
                search_expanded = search_region.unsqueeze(0)  # (1, 1, H_search, W_search)
                
                # 计算绝对差异
                abs_diff = torch.abs(search_expanded - ref_expanded)
                
                # 使用卷积计算SAD
                sad_map = F.conv2d(abs_diff, self.sad_kernel, padding=0)
                
                # 找到最小SAD的位置
                min_val, min_idx = torch.min(sad_map.view(-1), dim=0)
                min_y_idx = min_idx // sad_map.shape[3]
                min_x_idx = min_idx % sad_map.shape[3]
                
                # 计算运动矢量
                mv_y = min_y_idx - (center_y - min_y)
                mv_x = min_x_idx - (center_x - min_x)
                
                # 保存运动矢量和置信度
                motion_vectors[:, 0, i, j] = mv_x
                motion_vectors[:, 1, i, j] = mv_y
                confidence[:, 0, i, j] = 1.0 / (min_val + 1e-6)  # 避免除以零
        
        return motion_vectors.squeeze(0), confidence.squeeze(0)
    
    def _extract_blocks(self, img, block_size):
        """
        从图像中提取所有块
        
        参数:
            img: 输入图像 (B, H, W)
            block_size: 块大小
            
        返回:
            blocks: 提取的块 (B, block_size, block_size, H//block_size, W//block_size)
        """
        batch_size, height, width = img.shape
        h_blocks = height // block_size
        w_blocks = width // block_size
        
        # 使用unfold操作提取块
        blocks = img.unfold(1, block_size, block_size).unfold(2, block_size, block_size)
        
        return blocks


# 选择最快的实现
# 根据性能测试，FastBlockMatching通常比基本的BlockMatchingAlgorithm更快
# 因为它使用了向量化操作和卷积来加速SAD计算

# 修改GPUMotionVectors以使用更快的实现
@TRANSFORMS.register_module()
class FastGPUMotionVectors(GPUMotionVectors):
    """使用更快的块匹配算法实现"""
    
    def __init__(self, block_size=16, search_range=16, device='cuda', **kwargs):
        super().__init__(block_size, search_range, device, **kwargs)
        
        # 使用更快的实现
        self.bma = FastBlockMatching(
            block_size=block_size,
            search_range=search_range,
            device=device
        )