#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
基于时空特征融合的端到端微表情识别模型
"""

import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.models as models
import math
from einops import rearrange, repeat


class SpatialAttention(nn.Module):
    """空间注意力模块"""
    
    def __init__(self, kernel_size=7):
        super(SpatialAttention, self).__init__()
        
        assert kernel_size in (3, 7), "内核大小必须为3或7"
        padding = 3 if kernel_size == 7 else 1
        
        self.conv = nn.Conv2d(2, 1, kernel_size, padding=padding, bias=False)
        self.sigmoid = nn.Sigmoid()
    
    def forward(self, x):
        # 计算通道维度的平均值和最大值
        avg_out = torch.mean(x, dim=1, keepdim=True)
        max_out, _ = torch.max(x, dim=1, keepdim=True)
        
        # 拼接特征
        x = torch.cat([avg_out, max_out], dim=1)
        
        # 卷积和sigmoid激活
        x = self.conv(x)
        
        return self.sigmoid(x)


class TemporalAttention(nn.Module):
    """时间注意力模块"""
    
    def __init__(self, hidden_dim, num_heads=8):
        super(TemporalAttention, self).__init__()
        
        self.hidden_dim = hidden_dim
        self.num_heads = num_heads
        self.head_dim = hidden_dim // num_heads
        
        assert self.head_dim * num_heads == hidden_dim, "hidden_dim必须能被num_heads整除"
        
        self.query = nn.Linear(hidden_dim, hidden_dim)
        self.key = nn.Linear(hidden_dim, hidden_dim)
        self.value = nn.Linear(hidden_dim, hidden_dim)
        
        self.fc_out = nn.Linear(hidden_dim, hidden_dim)
    
    def forward(self, x):
        """
        x: [batch_size, seq_len, hidden_dim]
        """
        batch_size, seq_len, _ = x.size()
        
        # 计算Q、K、V
        q = self.query(x)
        k = self.key(x)
        v = self.value(x)
        
        # 重塑为多头
        q = q.view(batch_size, seq_len, self.num_heads, self.head_dim).permute(0, 2, 1, 3)
        k = k.view(batch_size, seq_len, self.num_heads, self.head_dim).permute(0, 2, 1, 3)
        v = v.view(batch_size, seq_len, self.num_heads, self.head_dim).permute(0, 2, 1, 3)
        
        # 计算注意力分数
        energy = torch.matmul(q, k.permute(0, 1, 3, 2)) / math.sqrt(self.head_dim)
        attention = F.softmax(energy, dim=-1)
        
        # 应用注意力
        out = torch.matmul(attention, v)
        out = out.permute(0, 2, 1, 3).contiguous().view(batch_size, seq_len, self.hidden_dim)
        
        return self.fc_out(out)


class SpatialEncoder(nn.Module):
    """空间特征编码器"""
    
    def __init__(self, config, input_dim=3, output_dim=256):
        super(SpatialEncoder, self).__init__()
        
        # 获取配置
        encoder_type = config.get('type', 'CNN')
        layers = config.get('layers', [64, 128, 256])
        
        self.encoder_type = encoder_type
        
        if encoder_type == 'CNN':
            # 创建卷积网络
            modules = []
            in_channels = input_dim
            
            for out_channels in layers:
                modules.append(
                    nn.Sequential(
                        nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=2, padding=1),
                        nn.BatchNorm2d(out_channels),
                        nn.ReLU(inplace=True)
                    )
                )
                in_channels = out_channels
            
            self.encoder = nn.Sequential(*modules)
            self.output_dim = layers[-1]
        
        elif encoder_type == 'ViT':
            # 使用预训练的Vision Transformer
            self.encoder = models.vit_b_16(pretrained=True)
            # 移除分类头
            self.encoder.heads = nn.Identity()
            self.output_dim = 768  # ViT-B/16的特征维度
        
        else:
            raise ValueError(f"不支持的空间编码器类型: {encoder_type}")
    
    def forward(self, x):
        """
        x: [batch_size, channels, height, width]
        """
        if self.encoder_type == 'CNN':
            return self.encoder(x)
        
        elif self.encoder_type == 'ViT':
            # ViT期望批次图像输入
            return self.encoder(x)


class TemporalEncoder(nn.Module):
    """时间特征编码器"""
    
    def __init__(self, config, input_dim=256, output_dim=256):
        super(TemporalEncoder, self).__init__()
        
        # 获取配置
        encoder_type = config.get('type', 'LSTM')
        hidden_size = config.get('hidden_size', 256)
        num_layers = config.get('num_layers', 2)
        bidirectional = config.get('bidirectional', True)
        
        self.encoder_type = encoder_type
        
        if encoder_type == 'LSTM':
            self.encoder = nn.LSTM(
                input_size=input_dim,
                hidden_size=hidden_size,
                num_layers=num_layers,
                batch_first=True,
                bidirectional=bidirectional
            )
            self.output_dim = hidden_size * 2 if bidirectional else hidden_size
        
        elif encoder_type == 'GRU':
            self.encoder = nn.GRU(
                input_size=input_dim,
                hidden_size=hidden_size,
                num_layers=num_layers,
                batch_first=True,
                bidirectional=bidirectional
            )
            self.output_dim = hidden_size * 2 if bidirectional else hidden_size
        
        elif encoder_type == 'Transformer':
            encoder_layer = nn.TransformerEncoderLayer(
                d_model=input_dim,
                nhead=8,
                dim_feedforward=hidden_size,
                batch_first=True
            )
            self.encoder = nn.TransformerEncoder(
                encoder_layer,
                num_layers=num_layers
            )
            self.output_dim = input_dim
        
        else:
            raise ValueError(f"不支持的时间编码器类型: {encoder_type}")
    
    def forward(self, x):
        """
        x: [batch_size, seq_len, features]
        """
        if self.encoder_type in ('LSTM', 'GRU'):
            output, _ = self.encoder(x)
            return output
        
        elif self.encoder_type == 'Transformer':
            return self.encoder(x)


class OpticalFlowEncoder(nn.Module):
    """光流特征编码器"""
    
    def __init__(self, config, input_dim=2, output_dim=128):
        super(OpticalFlowEncoder, self).__init__()
        
        # 获取配置
        encoder_type = config.get('type', 'CNN')
        layers = config.get('layers', [32, 64, 128])
        
        self.encoder_type = encoder_type
        
        if encoder_type == 'CNN':
            # 创建卷积网络
            modules = []
            in_channels = input_dim
            
            for out_channels in layers:
                modules.append(
                    nn.Sequential(
                        nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=2, padding=1),
                        nn.BatchNorm2d(out_channels),
                        nn.ReLU(inplace=True)
                    )
                )
                in_channels = out_channels
            
            self.encoder = nn.Sequential(*modules)
            self.output_dim = layers[-1]
        
        else:
            raise ValueError(f"不支持的光流编码器类型: {encoder_type}")
    
    def forward(self, x):
        """
        x: [batch_size, channels, height, width]
        """
        return self.encoder(x)


class FeatureFusion(nn.Module):
    """特征融合模块"""
    
    def __init__(self, spatial_dim, temporal_dim, flow_dim=None, fusion_method='concat'):
        super(FeatureFusion, self).__init__()
        
        self.fusion_method = fusion_method
        
        if fusion_method == 'concat':
            self.output_dim = spatial_dim + temporal_dim + (flow_dim if flow_dim else 0)
        
        elif fusion_method == 'add':
            assert spatial_dim == temporal_dim and (flow_dim is None or flow_dim == spatial_dim), \
                "加法融合要求所有特征维度相同"
            self.output_dim = spatial_dim
        
        elif fusion_method == 'attention':
            # 使用注意力机制融合特征
            self.spatial_attn = nn.Linear(spatial_dim, 1)
            self.temporal_attn = nn.Linear(temporal_dim, 1)
            
            if flow_dim:
                self.flow_attn = nn.Linear(flow_dim, 1)
                self.output_dim = spatial_dim + temporal_dim + flow_dim
            else:
                self.output_dim = spatial_dim + temporal_dim
            
            self.fc_out = nn.Linear(self.output_dim, min(spatial_dim, temporal_dim))
            self.output_dim = min(spatial_dim, temporal_dim)
        
        else:
            raise ValueError(f"不支持的融合方法: {fusion_method}")
    
    def forward(self, spatial_feat, temporal_feat, flow_feat=None):
        """
        特征融合
        
        Args:
            spatial_feat: 空间特征 [batch_size, spatial_dim]
            temporal_feat: 时间特征 [batch_size, temporal_dim]
            flow_feat: 光流特征 [batch_size, flow_dim] (可选)
            
        Returns:
            融合后的特征
        """
        if self.fusion_method == 'concat':
            if flow_feat is not None:
                return torch.cat([spatial_feat, temporal_feat, flow_feat], dim=1)
            else:
                return torch.cat([spatial_feat, temporal_feat], dim=1)
        
        elif self.fusion_method == 'add':
            if flow_feat is not None:
                return spatial_feat + temporal_feat + flow_feat
            else:
                return spatial_feat + temporal_feat
        
        elif self.fusion_method == 'attention':
            # 计算注意力权重
            spatial_weight = torch.sigmoid(self.spatial_attn(spatial_feat))
            temporal_weight = torch.sigmoid(self.temporal_attn(temporal_feat))
            
            if flow_feat is not None:
                flow_weight = torch.sigmoid(self.flow_attn(flow_feat))
                
                # 归一化权重
                total_weight = spatial_weight + temporal_weight + flow_weight
                spatial_weight = spatial_weight / total_weight
                temporal_weight = temporal_weight / total_weight
                flow_weight = flow_weight / total_weight
                
                # 加权融合
                weighted_feat = torch.cat([
                    spatial_feat * spatial_weight,
                    temporal_feat * temporal_weight,
                    flow_feat * flow_weight
                ], dim=1)
            else:
                # 归一化权重
                total_weight = spatial_weight + temporal_weight
                spatial_weight = spatial_weight / total_weight
                temporal_weight = temporal_weight / total_weight
                
                # 加权融合
                weighted_feat = torch.cat([
                    spatial_feat * spatial_weight,
                    temporal_feat * temporal_weight
                ], dim=1)
            
            return self.fc_out(weighted_feat)


class SpatioTemporalFusion(nn.Module):
    """基于时空特征融合的端到端微表情识别模型"""
    
    def __init__(self, backbone='resnet50', pretrained=True, feature_dim=512, num_classes=7,
                 dropout=0.5, use_attention=True, fusion_method='concat',
                 spatial_encoder=None, temporal_encoder=None, optical_flow_encoder=None,
                 num_frames=16, frame_size=(224, 224)):
        super(SpatioTemporalFusion, self).__init__()
        
        self.backbone = backbone
        self.pretrained = pretrained
        self.feature_dim = feature_dim
        self.num_classes = num_classes
        self.dropout = dropout
        self.use_attention = use_attention
        self.fusion_method = fusion_method
        self.num_frames = num_frames
        self.frame_size = frame_size
        
        # 初始化骨干网络（用于提取空间特征）
        if backbone == 'resnet50':
            self.backbone_net = models.resnet50(pretrained=pretrained)
            self.backbone_net.fc = nn.Identity()  # 移除最后的全连接层
            self.backbone_feat_dim = 2048
        
        elif backbone == 'efficientnet_b0':
            self.backbone_net = models.efficientnet_b0(pretrained=pretrained)
            self.backbone_net.classifier = nn.Identity()  # 移除最后的分类器
            self.backbone_feat_dim = 1280
        
        elif backbone == 'vit_base':
            self.backbone_net = models.vit_b_16(pretrained=pretrained)
            self.backbone_net.heads = nn.Identity()  # 移除分类头
            self.backbone_feat_dim = 768
        
        else:
            raise ValueError(f"不支持的骨干网络: {backbone}")
        
        # 初始化空间注意力
        if use_attention:
            self.spatial_attention = SpatialAttention(kernel_size=7)
        
        # 初始化空间特征编码器
        self.spatial_encoder = SpatialEncoder(
            spatial_encoder or {},
            input_dim=3,
            output_dim=256
        )
        self.spatial_feat_dim = self.spatial_encoder.output_dim
        
        # 初始化时间特征编码器
        self.temporal_encoder = TemporalEncoder(
            temporal_encoder or {},
            input_dim=self.backbone_feat_dim,
            output_dim=256
        )
        self.temporal_feat_dim = self.temporal_encoder.output_dim
        
        # 初始化光流特征编码器（可选）
        if optical_flow_encoder:
            self.optical_flow_encoder = OpticalFlowEncoder(
                optical_flow_encoder,
                input_dim=2,  # 光流有2个通道 (dx, dy)
                output_dim=128
            )
            self.flow_feat_dim = self.optical_flow_encoder.output_dim
        else:
            self.optical_flow_encoder = None
            self.flow_feat_dim = None
        
        # 初始化特征融合模块
        self.feature_fusion = FeatureFusion(
            spatial_dim=self.spatial_feat_dim,
            temporal_dim=self.temporal_feat_dim,
            flow_dim=self.flow_feat_dim,
            fusion_method=fusion_method
        )
        self.fusion_output_dim = self.feature_fusion.output_dim
        
        # 分类器
        self.classifier = nn.Sequential(
            nn.Linear(self.fusion_output_dim, feature_dim),
            nn.ReLU(),
            nn.Dropout(dropout),
            nn.Linear(feature_dim, num_classes)
        )
    
    def forward(self, x):
        """
        前向传播
        
        Args:
            x: 输入张量 [batch_size, channels, frames, height, width]
            
        Returns:
            logits: 分类输出
        """
        # 获取批次大小和帧数
        batch_size, C, T, H, W = x.shape
        
        # 重塑输入以便处理每一帧
        x = rearrange(x, 'b c t h w -> (b t) c h w')
        
        # 提取每一帧的特征（使用骨干网络）
        frame_features = self.backbone_net(x)
        
        # 应用空间注意力（如果启用）
        if self.use_attention:
            # 获取最后的特征图
            if self.backbone in ('resnet50', 'efficientnet_b0'):
                # 对于CNN骨干网络，我们需要获取最后的特征图
                if self.backbone == 'resnet50':
                    feature_maps = self.backbone_net.layer4(x)
                else:  # efficientnet_b0
                    feature_maps = self.backbone_net.features(x)
                
                # 应用空间注意力
                attention_mask = self.spatial_attention(feature_maps)
                feature_maps = feature_maps * attention_mask
                
                # 重新计算帧特征
                frame_features = F.adaptive_avg_pool2d(feature_maps, (1, 1))
                frame_features = frame_features.view(batch_size * T, -1)
        
        # 重塑回序列格式
        sequence_features = rearrange(frame_features, '(b t) d -> b t d', b=batch_size, t=T)
        
        # 提取时间特征
        temporal_features = self.temporal_encoder(sequence_features)
        
        # 处理光流特征（如果有编码器）
        if self.optical_flow_encoder and hasattr(x, 'optical_flow'):
            # 假设输入中包含光流信息
            optical_flow = x.optical_flow  # [batch_size, frames-1, 2, height, width]
            
            # 调整光流形状
            optical_flow = rearrange(optical_flow, 'b t c h w -> (b t) c h w')
            
            # 提取光流特征
            flow_features = self.optical_flow_encoder(optical_flow)
            
            # 处理特征尺寸不一致的问题（因为光流帧数比原始帧少1）
            # 在最后添加一个零填充的光流特征
            zero_flow = torch.zeros_like(flow_features[0:batch_size]).unsqueeze(1)
            flow_features = rearrange(flow_features, '(b t) d -> b t d', b=batch_size)
            flow_features = torch.cat([flow_features, zero_flow], dim=1)
            
            # 池化得到全局光流特征
            flow_global = torch.mean(flow_features, dim=1)  # [batch_size, flow_feat_dim]
        else:
            flow_global = None
        
        # 池化得到全局空间特征和时间特征
        spatial_global = torch.mean(sequence_features, dim=1)  # [batch_size, backbone_feat_dim]
        temporal_global = torch.mean(temporal_features, dim=1)  # [batch_size, temporal_feat_dim]
        
        # 处理特征维度不匹配
        if spatial_global.shape[1] != self.spatial_feat_dim:
            spatial_global = F.adaptive_avg_pool1d(
                spatial_global.unsqueeze(2), self.spatial_feat_dim
            ).squeeze(2)
        
        # 融合特征
        fused_features = self.feature_fusion(spatial_global, temporal_global, flow_global)
        
        # 分类
        logits = self.classifier(fused_features)
        
        return logits 