# Copyright (c) OpenMMLab. All rights reserved.
from typing import Dict, List, Optional, Tuple, Union

import torch
import torch.nn as nn
from mmcv.cnn import ConvModule
from mmengine.model import BaseModule

from mmaction.registry import MODELS
from .resnet3d_slowfast import ResNet3dPathway, build_pathway

@MODELS.register_module()
class ResNet3dSlowFastWithMotion(BaseModule):
    def __init__(self,
                pretrained: Optional[str] = None,
                resample_rate: int = 8,
                speed_ratio: int = 8,
                channel_ratio: int = 8,
                motion_fusion_type='early',
                slow_pathway: Dict = dict(
                    type='resnet3d',
                    depth=50,
                    pretrained=None,
                    lateral=True,
                    conv1_kernel=(1, 7, 7),
                    conv1_stride_t=1,
                    pool1_stride_t=1,
                    inflate=(0, 0, 1, 1)),
                fast_pathway: Dict = dict(
                    type='resnet3d',
                    depth=50,
                    pretrained=None,
                    lateral=False,
                    base_channels=8,
                    conv1_kernel=(5, 7, 7),
                    conv1_stride_t=1,
                    pool1_stride_t=1),
                init_cfg: Optional[Union[Dict, List[Dict]]] = None) -> None:
        super().__init__(init_cfg=init_cfg)
        self.pretrained = pretrained
        self.resample_rate = resample_rate
        self.speed_ratio = speed_ratio
        self.channel_ratio = channel_ratio

        if slow_pathway['lateral']:
            slow_pathway['speed_ratio'] = speed_ratio
            slow_pathway['channel_ratio'] = channel_ratio

        self.slow_path = build_pathway(slow_pathway)
        self.fast_path = build_pathway(fast_pathway)
        
        # Motion相关参数
        self.motion_fusion_type = motion_fusion_type
        self.motion_scale_factor = 16  # 224/14=16
        
        # Motion处理网络
        if motion_fusion_type in ['early', 'slow', 'fast']:
            self.motion_conv = nn.Sequential(
                ConvModule(
                    2, 64,  # 输入2通道(motion_x, motion_y)
                    kernel_size=(1, 3, 3),
                    stride=(1, 1, 1),
                    padding=(0, 1, 1),
                    bias=False,
                    conv_cfg=dict(type='Conv3d'),
                    norm_cfg=dict(type='BN3d', requires_grad=True),
                    act_cfg=dict(type='ReLU')
                ),
                ConvModule(
                    64, 64,
                    kernel_size=(1, 3, 3),
                    stride=(1, 1, 1),
                    padding=(0, 1, 1),
                    bias=False,
                    conv_cfg=dict(type='Conv3d'),
                    norm_cfg=dict(type='BN3d', requires_grad=True),
                    act_cfg=dict(type='ReLU')
                )
            )
        
        # 融合层 - 修复通道数设置
        if motion_fusion_type == 'early':
            # Slow path: 64 (RGB) + 64 (motion) = 128 -> 64
            self.slow_fusion_conv = ConvModule(
                128, 64,
                kernel_size=1,
                stride=1,
                padding=0,
                bias=False,
                conv_cfg=dict(type='Conv3d'),
                norm_cfg=dict(type='BN3d'),
                act_cfg=dict(type='ReLU')
            )
            # Fast path: 8 (RGB) + 64 (motion) = 72 -> 8
            self.fast_fusion_conv = ConvModule(
                72, 8,
                kernel_size=1,
                stride=1,
                padding=0,
                bias=False,
                conv_cfg=dict(type='Conv3d'),
                norm_cfg=dict(type='BN3d'),
                act_cfg=dict(type='ReLU')
            )
        elif motion_fusion_type == 'slow':
            # 只融合到slow pathway: 64 + 64 = 128 -> 64
            self.slow_fusion_conv = ConvModule(
                128, 64,
                kernel_size=1,
                stride=1,
                padding=0,
                bias=False,
                conv_cfg=dict(type='Conv3d'),
                norm_cfg=dict(type='BN3d'),
                act_cfg=dict(type='ReLU')
            )
        elif motion_fusion_type == 'fast':
            # 只融合到fast pathway: 8 + 64 = 72 -> 8
            self.fast_fusion_conv = ConvModule(
                72, 8,
                kernel_size=1,
                stride=1,
                padding=0,
                bias=False,
                conv_cfg=dict(type='Conv3d'),
                norm_cfg=dict(type='BN3d'),
                act_cfg=dict(type='ReLU')
            )

    def process_motion_vectors(self, motion, temporal_stride):
        """处理运动向量数据"""
        #print(f"process_motion_vectors 输入: {motion.shape}")
        
        # 确保数据类型是浮点数
        if motion.dtype != torch.float32:
            motion = motion.float()
        
        # 如果输入是 (N, 32, 14, 14, 2) 格式，转换为 (N, 2, 32, 14, 14)
        if len(motion.shape) == 5 and motion.shape[1] == 32 and motion.shape[4] == 2:
            motion = motion.permute(0, 4, 1, 2, 3)
            #print(f"调整维度后: {motion.shape}")
        
        # 计算目标维度
        if temporal_stride == 'slow':
            target_frames = 4   # 32 / 8
            target_spatial = 56
        else:  # fast
            target_frames = 32  # 保持不变
            target_spatial = 56
        
        # 获取当前维度
        N, C, T, H, W = motion.shape
        #print(f"当前: N={N}, C={C}, T={T}, H={H}, W={W}")
        #print(f"目标: T={target_frames}, H={target_spatial}, W={target_spatial}")
        
        # 调整维度
        if T != target_frames or H != target_spatial or W != target_spatial:
            #print(f"进行维度调整")
            motion = nn.functional.interpolate(
                motion,
                size=(target_frames, target_spatial, target_spatial),
                mode='trilinear',
                align_corners=False
            )
        
        #print(f"process_motion_vectors 输出: {motion.shape}")
        return motion

    def forward(self, imgs=None, motion_vectors=None):
        """前向传播"""
        x = imgs
        
        # 添加调试信息
        #print(f"输入 - imgs: {x.shape if imgs is not None else None}")
        #print(f"输入 - motion_vectors: {motion_vectors.shape if motion_vectors is not None else None}")
        
        # 处理RGB输入
        x_slow = nn.functional.interpolate(
            x, mode='nearest', scale_factor=(1.0 / self.resample_rate, 1.0, 1.0))
        x_slow = self.slow_path.conv1(x_slow)
        x_slow = self.slow_path.maxpool(x_slow)

        x_fast = nn.functional.interpolate(
            x, mode='nearest', 
            scale_factor=(1.0 / (self.resample_rate // self.speed_ratio), 1.0, 1.0))
        x_fast = self.fast_path.conv1(x_fast)
        x_fast = self.fast_path.maxpool(x_fast)
        
        #print(f"处理后 - x_slow: {x_slow.shape}")
        #print(f"处理后 - x_fast: {x_fast.shape}")
        
        # 处理运动向量
        if motion_vectors is not None:
            motion_slow = self.process_motion_vectors(motion_vectors, 'slow')
            motion_fast = self.process_motion_vectors(motion_vectors, 'fast')
            
            #print(f"处理后 - motion_slow: {motion_slow.shape}")
            #print(f"处理后 - motion_fast: {motion_fast.shape}")
            
            motion_slow = self.motion_conv(motion_slow)
            motion_fast = self.motion_conv(motion_fast)
            
            #print(f"motion_conv后 - motion_slow: {motion_slow.shape}")
            #print(f"motion_conv后 - motion_fast: {motion_fast.shape}")
            
            if self.motion_fusion_type == 'early':
                # 早期融合: 在第一个卷积层后融合
                #print(f"融合前 - x_slow: {x_slow.shape}, motion_slow: {motion_slow.shape}")
                x_slow = torch.cat([x_slow, motion_slow], dim=1)
                #print(f"拼接后 - x_slow: {x_slow.shape}")
                x_slow = self.slow_fusion_conv(x_slow)
                #print(f"融合后 - x_slow: {x_slow.shape}")
                
                #print(f"融合前 - x_fast: {x_fast.shape}, motion_fast: {motion_fast.shape}")
                x_fast = torch.cat([x_fast, motion_fast], dim=1)
                #print(f"拼接后 - x_fast: {x_fast.shape}")
                x_fast = self.fast_fusion_conv(x_fast)
                #print(f"融合后 - x_fast: {x_fast.shape}")
                
            elif self.motion_fusion_type == 'slow':
                # 只融合到slow pathway
                x_slow = torch.cat([x_slow, motion_slow], dim=1)
                x_slow = self.slow_fusion_conv(x_slow)
            elif self.motion_fusion_type == 'fast':
                # 只融合到fast pathway  
                x_fast = torch.cat([x_fast, motion_fast], dim=1)
                x_fast = self.fast_fusion_conv(x_fast)

        # 后续的lateral连接和残差块处理
        if self.slow_path.lateral:
            x_fast_lateral = self.slow_path.conv1_lateral(x_fast)
            x_slow = torch.cat((x_slow, x_fast_lateral), dim=1)

        for i, layer_name in enumerate(self.slow_path.res_layers):
            res_layer = getattr(self.slow_path, layer_name)
            x_slow = res_layer(x_slow)
            res_layer_fast = getattr(self.fast_path, layer_name)
            x_fast = res_layer_fast(x_fast)
            
            if (i != len(self.slow_path.res_layers) - 1 and 
                self.slow_path.lateral):
                lateral_name = self.slow_path.lateral_connections[i]
                conv_lateral = getattr(self.slow_path, lateral_name)
                x_fast_lateral = conv_lateral(x_fast)
                x_slow = torch.cat((x_slow, x_fast_lateral), dim=1)

        return (x_slow, x_fast)