from ultralytics import YOLO
import torch
import torch.nn as nn
import torch.nn.functional as F

class TemporalAttention(nn.Module):
    def __init__(self, channels):
        super().__init__()
        self.query = nn.Conv2d(channels, channels, kernel_size=1)
        self.key = nn.Conv2d(channels, channels, kernel_size=1)
        self.value = nn.Conv2d(channels, channels, kernel_size=1)
        self.gamma = nn.Parameter(torch.zeros(1))

    def forward(self, x):
        batch, c, h, w = x.size()
        
        # 生成Q, K, V
        q = self.query(x).view(batch, c, -1)
        k = self.key(x).view(batch, c, -1)
        v = self.value(x).view(batch, c, -1)
        
        # 计算注意力权重
        attn = torch.bmm(q.permute(0, 2, 1), k)
        attn = F.softmax(attn, dim=-1)
        
        # 应用注意力
        out = torch.bmm(v, attn.permute(0, 2, 1))
        out = out.view(batch, c, h, w)
        
        return self.gamma * out + x

class TemporalFeatureModule(nn.Module):
    def __init__(self, in_channels):
        super().__init__()
        self.conv3d = nn.Conv3d(in_channels, in_channels, kernel_size=(3,3,3), padding=(1,1,1))
        self.attention = TemporalAttention(in_channels)
        self.fusion = nn.Conv2d(in_channels * 2, in_channels, kernel_size=1)
        
    def forward(self, x):
        # 假设输入x的形状为 [batch, frames, channels, height, width]
        b, f, c, h, w = x.size()
        
        # 3D卷积处理时序信息
        temp_feat = self.conv3d(x.transpose(1,2)).transpose(1,2)
        
        # 注意力处理
        current_frame = x[:, -1]  # 获取最新帧
        attn_feat = self.attention(current_frame)
        
        # 特征融合
        combined = torch.cat([temp_feat[:, -1], attn_feat], dim=1)
        out = self.fusion(combined)
        
        return out

class TemporalConsistencyLoss(nn.Module):
    def __init__(self):
        super().__init__()
        self.mse = nn.MSELoss()
        
    def forward(self, pred_poses, prev_poses):
        """计算时序一致性损失"""
        # 计算相邻帧之间的姿态变化
        pose_diff = pred_poses[:, 1:] - pred_poses[:, :-1]
        prev_diff = prev_poses[:, 1:] - prev_poses[:, :-1]
        
        # 计算变化的一致性损失
        return self.mse(pose_diff, prev_diff)

class TemporalYOLO(nn.Module):
    def __init__(self, base_model='yolov8n-pose.pt'):
        super().__init__()
        self.base_model = YOLO(base_model)
        self.temporal_module = TemporalFeatureModule(256)
        self.temporal_loss = TemporalConsistencyLoss()
        
        # 获取原始模型的检测头
        self.head = self.base_model.model.model[-1]
        
    def forward(self, x):
        """
        x: 输入张量，形状为 [batch, frames, channels, height, width]
        """
        batch_size = x.size(0)
        num_frames = x.size(1)
        
        # 存储每帧的特征和预测结果
        all_features = []
        all_predictions = []
        
        # 处理每一帧
        for i in range(num_frames):
            # 提取当前帧的基础特征
            current_frame = x[:, i]
            features = self.base_model.model.model[:-1](current_frame)
            all_features.append(features)
        
        # 将特征堆叠成时序序列
        stacked_features = torch.stack(all_features, dim=1)
        
        # 应用时序模块
        temporal_features = self.temporal_module(stacked_features)
        
        # 通过检测头得到最终预测
        predictions = self.head(temporal_features)
        
        return predictions
    
    def train_step(self, batch, device):
        """单步训练"""
        images, targets = batch
        images = images.to(device)
        targets = targets.to(device)
        
        # 前向传播
        predictions = self(images)
        
        # 计算基础损失
        base_loss = self.base_model.criterion(predictions, targets)
        
        # 计算时序一致性损失
        temp_loss = self.temporal_loss(predictions, targets)
        
        # 总损失
        total_loss = base_loss + 0.1 * temp_loss
        
        return total_loss

def create_temporal_yolo():
    """
    创建时序增强的YOLO模型
    """
    model = TemporalYOLO()
    return model 