"""
基于Transformer的频谱分析模型
设计用于处理8192长度的频谱序列
优化以适应单张4090显存限制
"""

import torch
import torch.nn as nn
import torch.nn.functional as F
import math
from typing import Dict, Tuple


class PositionalEncoding(nn.Module):
    """位置编码"""
    
    def __init__(self, d_model: int, max_len: int = 10000, dropout: float = 0.1):
        super().__init__()
        self.dropout = nn.Dropout(p=dropout)
        
        # 创建位置编码
        position = torch.arange(max_len).unsqueeze(1)
        div_term = torch.exp(torch.arange(0, d_model, 2) * (-math.log(10000.0) / d_model))
        pe = torch.zeros(max_len, d_model)
        pe[:, 0::2] = torch.sin(position * div_term)
        pe[:, 1::2] = torch.cos(position * div_term)
        self.register_buffer('pe', pe)
    
    def forward(self, x: torch.Tensor) -> torch.Tensor:
        """
        参数:
            x: (batch_size, seq_len, d_model)
        """
        x = x + self.pe[:x.size(1)]
        return self.dropout(x)


class DownsamplingBlock(nn.Module):
    """下采样模块 - 使用卷积减少序列长度"""
    
    def __init__(self, in_channels: int, out_channels: int, downsample_factor: int = 2):
        super().__init__()
        self.conv = nn.Conv1d(
            in_channels, 
            out_channels, 
            kernel_size=downsample_factor * 2 - 1,
            stride=downsample_factor,
            padding=downsample_factor // 2
        )
        self.norm = nn.LayerNorm(out_channels)
        self.activation = nn.GELU()
    
    def forward(self, x: torch.Tensor) -> torch.Tensor:
        """
        参数:
            x: (batch_size, seq_len, channels)
        返回:
            out: (batch_size, seq_len//downsample_factor, out_channels)
        """
        # 转换为Conv1d格式
        x = x.transpose(1, 2)  # (batch, channels, seq_len)
        x = self.conv(x)
        x = x.transpose(1, 2)  # (batch, seq_len, channels)
        x = self.norm(x)
        x = self.activation(x)
        return x


class SpectrumTransformer(nn.Module):
    """
    频谱分析Transformer模型
    
    输入: 频谱 (batch_size, 8192)
    输出: 多任务预测
        - 谐波检测
        - 谐波簇参数
        - 调制检测
        - 调制参数
    """
    
    def __init__(self,
                 spectrum_len: int = 8192,
                 d_model: int = 256,
                 nhead: int = 8,
                 num_encoder_layers: int = 4,
                 dim_feedforward: int = 1024,
                 dropout: float = 0.1,
                 max_harmonic_clusters: int = 3,
                 max_modulations: int = 2):
        super().__init__()
        
        self.spectrum_len = spectrum_len
        self.d_model = d_model
        self.max_harmonic_clusters = max_harmonic_clusters
        self.max_modulations = max_modulations
        
        # 输入嵌入层：将频谱值映射到d_model维度
        self.input_embedding = nn.Linear(1, d_model)
        
        # 下采样：8192 -> 2048 -> 512
        # 这样可以大幅减少计算量，使得能在4090上训练
        self.downsample1 = DownsamplingBlock(d_model, d_model, downsample_factor=4)
        self.downsample2 = DownsamplingBlock(d_model, d_model, downsample_factor=4)
        # 最终序列长度: 8192 / 16 = 512
        
        self.reduced_len = spectrum_len // 16
        
        # 位置编码
        self.pos_encoder = PositionalEncoding(d_model, max_len=self.reduced_len, dropout=dropout)
        
        # Transformer编码器
        encoder_layer = nn.TransformerEncoderLayer(
            d_model=d_model,
            nhead=nhead,
            dim_feedforward=dim_feedforward,
            dropout=dropout,
            activation='gelu',
            batch_first=True
        )
        self.transformer_encoder = nn.TransformerEncoder(
            encoder_layer,
            num_layers=num_encoder_layers
        )
        
        # 全局池化
        self.global_pool = nn.AdaptiveAvgPool1d(1)
        
        # ===== 任务头 =====
        
        # 1. 谐波检测头
        self.harmonic_classifier = nn.Sequential(
            nn.Linear(d_model, 128),
            nn.GELU(),
            nn.Dropout(dropout),
            nn.Linear(128, 1)  # 二分类
        )
        
        # 2. 谐波簇数量预测
        self.num_clusters_predictor = nn.Sequential(
            nn.Linear(d_model, 128),
            nn.GELU(),
            nn.Dropout(dropout),
            nn.Linear(128, max_harmonic_clusters + 1)  # 0-3个簇
        )
        
        # 3. 每个谐波簇的参数预测
        # 为每个可能的簇预测: 基频 + 谐波数量
        self.cluster_params = nn.ModuleList([
            nn.Sequential(
                nn.Linear(d_model, 128),
                nn.GELU(),
                nn.Dropout(dropout),
                nn.Linear(128, 2)  # [base_freq, num_harmonics]
            ) for _ in range(max_harmonic_clusters)
        ])
        
        # 4. 调制检测头
        self.modulation_classifier = nn.Sequential(
            nn.Linear(d_model, 128),
            nn.GELU(),
            nn.Dropout(dropout),
            nn.Linear(128, 1)  # 二分类
        )
        
        # 5. 调制数量预测
        self.num_modulations_predictor = nn.Sequential(
            nn.Linear(d_model, 128),
            nn.GELU(),
            nn.Dropout(dropout),
            nn.Linear(128, max_modulations + 1)  # 0-2个调制
        )
        
        # 6. 每个调制的参数预测
        # 为每个可能的调制预测: 载波频率 + 调制频率 + 边带数量
        self.modulation_params = nn.ModuleList([
            nn.Sequential(
                nn.Linear(d_model, 128),
                nn.GELU(),
                nn.Dropout(dropout),
                nn.Linear(128, 3)  # [carrier_freq, modulation_freq, num_sidebands]
            ) for _ in range(max_modulations)
        ])
        
        # 7. 轴承故障检测（可选，作为额外特征）
        self.bearing_classifier = nn.Sequential(
            nn.Linear(d_model, 128),
            nn.GELU(),
            nn.Dropout(dropout),
            nn.Linear(128, 1)
        )
    
    def forward(self, spectrum: torch.Tensor) -> Dict[str, torch.Tensor]:
        """
        前向传播
        
        参数:
            spectrum: (batch_size, spectrum_len) 频谱数据
        
        返回:
            predictions: 字典包含所有预测结果
        """
        batch_size = spectrum.size(0)
        
        # 扩展维度用于embedding
        x = spectrum.unsqueeze(-1)  # (batch, spectrum_len, 1)
        
        # 输入嵌入
        x = self.input_embedding(x)  # (batch, spectrum_len, d_model)
        
        # 下采样
        x = self.downsample1(x)  # (batch, spectrum_len/4, d_model)
        x = self.downsample2(x)  # (batch, spectrum_len/16, d_model)
        
        # 位置编码
        x = self.pos_encoder(x)
        
        # Transformer编码
        x = self.transformer_encoder(x)  # (batch, reduced_len, d_model)
        
        # 全局池化得到全局特征
        # 转置用于池化
        x_pooled = self.global_pool(x.transpose(1, 2)).squeeze(-1)  # (batch, d_model)
        
        # ===== 多任务预测 =====
        predictions = {}
        
        # 1. 谐波检测
        predictions['has_harmonic'] = self.harmonic_classifier(x_pooled)  # (batch, 1)
        
        # 2. 谐波簇数量
        predictions['num_harmonic_clusters'] = self.num_clusters_predictor(x_pooled)  # (batch, 4)
        
        # 3. 每个簇的参数
        cluster_params_list = []
        for i in range(self.max_harmonic_clusters):
            params = self.cluster_params[i](x_pooled)  # (batch, 2)
            cluster_params_list.append(params)
        predictions['harmonic_cluster_params'] = torch.stack(cluster_params_list, dim=1)  # (batch, max_clusters, 2)
        
        # 4. 调制检测
        predictions['has_modulation'] = self.modulation_classifier(x_pooled)  # (batch, 1)
        
        # 5. 调制数量
        predictions['num_modulations'] = self.num_modulations_predictor(x_pooled)  # (batch, 3)
        
        # 6. 每个调制的参数
        modulation_params_list = []
        for i in range(self.max_modulations):
            params = self.modulation_params[i](x_pooled)  # (batch, 3)
            modulation_params_list.append(params)
        predictions['modulation_params'] = torch.stack(modulation_params_list, dim=1)  # (batch, max_mods, 3)
        
        # 7. 轴承故障检测
        predictions['has_bearing_fault'] = self.bearing_classifier(x_pooled)  # (batch, 1)
        
        return predictions
    
    def count_parameters(self) -> int:
        """统计模型参数数量"""
        return sum(p.numel() for p in self.parameters() if p.requires_grad)


class SpectrumTransformerLoss(nn.Module):
    """
    多任务损失函数
    """
    
    def __init__(self, 
                 weight_harmonic_cls: float = 1.0,
                 weight_num_clusters: float = 1.0,
                 weight_cluster_params: float = 2.0,
                 weight_modulation_cls: float = 1.0,
                 weight_num_modulations: float = 1.0,
                 weight_modulation_params: float = 2.0,
                 weight_bearing: float = 0.5,
                 max_freq: float = 2560.0):
        super().__init__()
        
        self.weight_harmonic_cls = weight_harmonic_cls
        self.weight_num_clusters = weight_num_clusters
        self.weight_cluster_params = weight_cluster_params
        self.weight_modulation_cls = weight_modulation_cls
        self.weight_num_modulations = weight_num_modulations
        self.weight_modulation_params = weight_modulation_params
        self.weight_bearing = weight_bearing
        self.max_freq = max_freq
        
        self.bce_loss = nn.BCEWithLogitsLoss()
        self.ce_loss = nn.CrossEntropyLoss()
        self.mse_loss = nn.MSELoss()
    
    def forward(self, 
                predictions: Dict[str, torch.Tensor], 
                targets: Dict[str, torch.Tensor]) -> Tuple[torch.Tensor, Dict[str, float]]:
        """
        计算总损失
        
        参数:
            predictions: 模型预测
            targets: 目标标签
        
        返回:
            total_loss: 总损失
            loss_dict: 各项损失的字典
        """
        losses = {}
        
        # 1. 谐波检测损失
        loss_harmonic_cls = self.bce_loss(
            predictions['has_harmonic'], 
            targets['has_harmonic']
        )
        losses['harmonic_cls'] = loss_harmonic_cls.item()
        
        # 2. 谐波簇数量损失
        loss_num_clusters = self.ce_loss(
            predictions['num_harmonic_clusters'],
            targets['num_harmonic_clusters']
        )
        losses['num_clusters'] = loss_num_clusters.item()
        
        # 3. 谐波簇参数损失（只对有谐波的样本计算）
        if targets['has_harmonic'].sum() > 0:
            # 获取有谐波的样本mask
            harmonic_mask = targets['has_harmonic'].squeeze() > 0.5
            
            # 归一化频率到[0, 1]
            pred_params = predictions['harmonic_cluster_params'][harmonic_mask]
            target_params = targets['harmonic_cluster_params'][harmonic_mask]
            
            # 基频归一化
            pred_params_norm = pred_params.clone()
            target_params_norm = target_params.clone()
            pred_params_norm[..., 0] = torch.sigmoid(pred_params[..., 0]) * self.max_freq
            target_params_norm[..., 0] = target_params[..., 0]
            
            # 谐波数量归一化到[0, 1]
            pred_params_norm[..., 1] = torch.sigmoid(pred_params[..., 1]) * 10
            target_params_norm[..., 1] = target_params[..., 1]
            
            loss_cluster_params = self.mse_loss(pred_params_norm, target_params_norm)
        else:
            loss_cluster_params = torch.tensor(0.0, device=predictions['has_harmonic'].device)
        losses['cluster_params'] = loss_cluster_params.item()
        
        # 4. 调制检测损失
        loss_modulation_cls = self.bce_loss(
            predictions['has_modulation'],
            targets['has_modulation']
        )
        losses['modulation_cls'] = loss_modulation_cls.item()
        
        # 5. 调制数量损失
        loss_num_modulations = self.ce_loss(
            predictions['num_modulations'],
            targets['num_modulations']
        )
        losses['num_modulations'] = loss_num_modulations.item()
        
        # 6. 调制参数损失
        if targets['has_modulation'].sum() > 0:
            modulation_mask = targets['has_modulation'].squeeze() > 0.5
            
            pred_mod_params = predictions['modulation_params'][modulation_mask]
            target_mod_params = targets['modulation_params'][modulation_mask]
            
            # 参数归一化
            pred_mod_params_norm = pred_mod_params.clone()
            target_mod_params_norm = target_mod_params.clone()
            
            # 载波频率和调制频率归一化
            pred_mod_params_norm[..., 0] = torch.sigmoid(pred_mod_params[..., 0]) * self.max_freq
            pred_mod_params_norm[..., 1] = torch.sigmoid(pred_mod_params[..., 1]) * self.max_freq
            pred_mod_params_norm[..., 2] = torch.sigmoid(pred_mod_params[..., 2]) * 5  # 边带数量
            
            loss_modulation_params = self.mse_loss(pred_mod_params_norm, target_mod_params_norm)
        else:
            loss_modulation_params = torch.tensor(0.0, device=predictions['has_modulation'].device)
        losses['modulation_params'] = loss_modulation_params.item()
        
        # 7. 轴承故障损失
        loss_bearing = self.bce_loss(
            predictions['has_bearing_fault'],
            targets['has_bearing_fault']
        )
        losses['bearing'] = loss_bearing.item()
        
        # 总损失
        total_loss = (
            self.weight_harmonic_cls * loss_harmonic_cls +
            self.weight_num_clusters * loss_num_clusters +
            self.weight_cluster_params * loss_cluster_params +
            self.weight_modulation_cls * loss_modulation_cls +
            self.weight_num_modulations * loss_num_modulations +
            self.weight_modulation_params * loss_modulation_params +
            self.weight_bearing * loss_bearing
        )
        
        losses['total'] = total_loss.item()
        
        return total_loss, losses


if __name__ == "__main__":
    # 测试模型
    print("测试Transformer模型...")
    
    # 创建模型
    model = SpectrumTransformer(
        spectrum_len=8192,
        d_model=256,
        nhead=8,
        num_encoder_layers=4,
        dim_feedforward=1024,
        dropout=0.1
    )
    
    print(f"模型参数数量: {model.count_parameters():,}")
    
    # 测试前向传播
    batch_size = 4
    spectrum = torch.randn(batch_size, 8192)
    
    predictions = model(spectrum)
    
    print("\n预测输出形状:")
    for key, value in predictions.items():
        print(f"  {key}: {value.shape}")
    
    # 估算显存占用
    print(f"\n模型大小估算: {model.count_parameters() * 4 / 1024 / 1024:.2f} MB (FP32)")
    print(f"建议batch size: 32-64 (在4090 24GB上)")

