# src/model.py

import sys
import os
# 添加项目根目录到Python路径
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))

import torch
import torch.nn as nn
import math
from src.config_loader import load_config

class PositionalEncoding(nn.Module):
    """
    Injects positional information into the input embeddings.
    This implementation is based on the original Transformer paper "Attention Is All You Need".
    It uses fixed sinusoidal functions, which can help the model generalize to sequence
    lengths not seen during training.
    """
    def __init__(self, d_model: int, dropout: float = 0.1, max_len: int = 5000):
        super().__init__()
        self.dropout = nn.Dropout(p=dropout)

        position = torch.arange(max_len).unsqueeze(1)
        div_term = torch.exp(torch.arange(0, d_model, 2) * (-math.log(10000.0) / d_model))
        pe = torch.zeros(max_len, 1, d_model)
        pe[:, 0, 0::2] = torch.sin(position * div_term)
        pe[:, 0, 1::2] = torch.cos(position * div_term)
        self.register_buffer('pe', pe)

    def forward(self, x: torch.Tensor) -> torch.Tensor:
        """
        Args:
            x: Tensor, shape [seq_len, batch_size, embedding_dim]
        """
        x = x + self.pe[:x.size(0)]
        return self.dropout(x)

class ChannelIndependentTransformer(nn.Module):
    """
    A Transformer model that processes each channel of a multivariate time series independently.
    This design choice is based on research showing that channel-independent (CI) models
    are more robust to distribution drift and often outperform channel-dependent models
    on real-world, non-stationary time series data.
    
    支持三种任务模式：
    - 'age': 仅年龄回归
    - 'gender': 仅性别分类
    - 'both': 多任务学习（年龄+性别）
    """
    def __init__(self, cfg=None, task_type=None):
        super().__init__()
        
        # 加载配置
        if cfg is None:
            cfg = load_config()
        
        # 任务类型
        if task_type is None:
            self.task_type = cfg.get('task', {}).get('type', 'both')
        else:
            self.task_type = task_type
        
        if self.task_type not in ['age', 'gender', 'both']:
            raise ValueError(f"Invalid task_type: {self.task_type}. Must be 'age', 'gender', or 'both'")
        
        self.num_features = len(cfg['data']['features'])
        self.d_model = cfg['model']['d_model']
        window_size = cfg['preprocessing']['window_size']
        dropout = cfg['model']['dropout']
        n_heads = cfg['model']['n_heads']
        dim_feedforward = cfg['model']['dim_feedforward']
        num_layers = cfg['model']['num_encoder_layers']

        # 1. Channel Embedding Layers
        # Each channel gets its own embedding layer to process its time series independently
        self.channel_embeddings = nn.ModuleList([
            nn.Linear(window_size, self.d_model)
            for _ in range(self.num_features)
        ])

        # 2. Positional Encoding for channel sequence
        self.channel_pos_encoder = PositionalEncoding(d_model=self.d_model, dropout=dropout, max_len=self.num_features)

        # 3. Core Transformer Encoder
        # We use PyTorch's built-in, optimized TransformerEncoder.
        encoder_layer = nn.TransformerEncoderLayer(
            d_model=self.d_model,
            nhead=n_heads,
            dim_feedforward=dim_feedforward,
            dropout=dropout,
            batch_first=True  # Important: our data is (batch, seq, feature)
        )
        self.transformer_encoder = nn.TransformerEncoder(encoder_layer, num_layers=num_layers)

        # 4. Aggregation and Task-Specific Prediction Heads
        self.flatten = nn.Flatten()
        
        # Shared layer before the task-specific heads
        self.shared_head = nn.Sequential(
            nn.Linear(self.num_features * self.d_model, self.d_model),
            nn.ReLU(),
            nn.Dropout(dropout)
        )

        # Task-specific heads (根据任务类型创建)
        if self.task_type in ['age', 'both']:
            self.age_head = nn.Linear(self.d_model, 1)  # Regression output for age
        
        if self.task_type in ['gender', 'both']:
            self.gender_head = nn.Linear(self.d_model, 1)  # Logit output for gender classification

    def forward(self, x: torch.Tensor):
        """
        Forward pass of the model.
        
        Args:
            x: Input tensor of shape (batch_size, window_size, num_features)
            
        Returns:
            根据task_type返回不同内容：
            - 'age': 只返回 age_pred (batch_size,)
            - 'gender': 只返回 gender_logit (batch_size,)
            - 'both': 返回 (age_pred, gender_logit)
        """
        # Input x has shape: (batch_size, window_size, num_features)
        batch_size, window_size, num_features = x.shape
        
        # 1. Process each channel/feature independently through its embedding layer
        channel_outputs = []
        for i in range(self.num_features):
            # Process each feature/channel: (batch_size, window_size) -> (batch_size, d_model)
            channel_input = x[:, :, i]  # (batch_size, window_size)
            channel_embedded = self.channel_embeddings[i](channel_input)  # (batch_size, d_model)
            channel_outputs.append(channel_embedded)

        # 2. Stack channel embeddings into a sequence
        # (batch_size, num_features, d_model)
        aggregated_features = torch.stack(channel_outputs, dim=1)
        
        # 3. Add positional encoding for the channels
        # Permute for pos_encoder: (num_features, batch_size, d_model)
        aggregated_features_perm = aggregated_features.permute(1, 0, 2)
        aggregated_features_perm = self.channel_pos_encoder(aggregated_features_perm)
        # Permute back: (batch_size, num_features, d_model)
        aggregated_features = aggregated_features_perm.permute(1, 0, 2)
        
        # 4. Pass through transformer encoder
        transformer_output = self.transformer_encoder(aggregated_features)  # (batch_size, num_features, d_model)
        
        # 5. Flatten and pass through shared layer
        flat_output = self.flatten(transformer_output)  # (batch_size, num_features * d_model)
        shared_features = self.shared_head(flat_output)  # (batch_size, d_model)

        # 6. Task-specific predictions (根据任务类型返回)
        if self.task_type == 'age':
            age_pred = self.age_head(shared_features).squeeze(-1)  # (batch_size,)
            return age_pred
        
        elif self.task_type == 'gender':
            gender_logit = self.gender_head(shared_features).squeeze(-1)  # (batch_size,)
            return gender_logit
        
        else:  # 'both'
            age_pred = self.age_head(shared_features).squeeze(-1)  # (batch_size,)
            gender_logit = self.gender_head(shared_features).squeeze(-1)  # (batch_size,)
            return age_pred, gender_logit


# ==================== CNN-Transformer混合架构 ====================

class CNNFeatureExtractor(nn.Module):
    """
    1D-CNN特征提取器
    用于从HRV时间序列中提取局部模式和特征
    """
    def __init__(self, input_channels=1, output_dim=128, dropout=0.1):
        """
        Args:
            input_channels: 输入通道数（对于HRV单变量时间序列，通常为1）
            output_dim: 输出特征维度
            dropout: Dropout比例
        """
        super().__init__()
        
        # CNN层：逐步提取特征并下采样
        self.conv_blocks = nn.Sequential(
            # Block 1: (batch, 1, seq_len) -> (batch, 64, seq_len/2)
            nn.Conv1d(input_channels, 64, kernel_size=7, stride=1, padding=3),
            nn.BatchNorm1d(64),
            nn.ReLU(),
            nn.MaxPool1d(kernel_size=2, stride=2),
            nn.Dropout(dropout),
            
            # Block 2: (batch, 64, seq_len/2) -> (batch, 128, seq_len/4)
            nn.Conv1d(64, 128, kernel_size=5, stride=1, padding=2),
            nn.BatchNorm1d(128),
            nn.ReLU(),
            nn.MaxPool1d(kernel_size=2, stride=2),
            nn.Dropout(dropout),
            
            # Block 3: (batch, 128, seq_len/4) -> (batch, 256, seq_len/8)
            nn.Conv1d(128, 256, kernel_size=3, stride=1, padding=1),
            nn.BatchNorm1d(256),
            nn.ReLU(),
            nn.MaxPool1d(kernel_size=2, stride=2),
            nn.Dropout(dropout),
            
            # Block 4: (batch, 256, seq_len/8) -> (batch, output_dim, seq_len/8)
            nn.Conv1d(256, output_dim, kernel_size=3, stride=1, padding=1),
            nn.BatchNorm1d(output_dim),
            nn.ReLU(),
            nn.Dropout(dropout)
        )
        
        self.output_dim = output_dim
    
    def forward(self, x):
        """
        Args:
            x: (batch_size, input_channels, sequence_length)
        
        Returns:
            (batch_size, output_dim, new_sequence_length)
        """
        return self.conv_blocks(x)


class HRVHybridTransformer(nn.Module):
    """
    CNN-Transformer混合架构模型（多模态版本）
    使用CNN提取局部特征，然后使用Transformer捕捉长期依赖
    支持时间序列数据和表格数据的多模态融合
    
    支持三种任务模式：
    - 'age': 仅年龄回归
    - 'gender': 仅性别分类
    - 'both': 多任务学习（年龄+性别）
    """
    def __init__(self, cfg=None, task_type=None, use_multimodal=True):
        super().__init__()
        
        # 加载配置
        if cfg is None:
            cfg = load_config()
        
        # 任务类型
        if task_type is None:
            self.task_type = cfg.get('task', {}).get('type', 'both')
        else:
            self.task_type = task_type
        
        if self.task_type not in ['age', 'gender', 'both']:
            raise ValueError(f"Invalid task_type: {self.task_type}. Must be 'age', 'gender', or 'both'")
        
        self.use_multimodal = use_multimodal
        
        # 获取配置参数
        self.num_features = len(cfg['data']['features'])
        self.d_model = cfg['model']['d_model']
        window_size = cfg['preprocessing']['window_size']
        dropout = cfg['model']['dropout']
        n_heads = cfg['model']['n_heads']
        dim_feedforward = cfg['model']['dim_feedforward']
        num_layers = cfg['model']['num_encoder_layers']
        
        # 1. CNN特征提取器（为每个特征独立提取）
        self.cnn_extractors = nn.ModuleList([
            CNNFeatureExtractor(
                input_channels=1,
                output_dim=self.d_model,
                dropout=dropout
            )
            for _ in range(self.num_features)
        ])
        
        # 计算CNN输出后的序列长度
        # 三次MaxPool1d，每次stride=2，所以长度变为 window_size / 8
        self.cnn_output_length = window_size // 8
        
        # 2. Positional Encoding
        self.pos_encoder = PositionalEncoding(
            d_model=self.d_model,
            dropout=dropout,
            max_len=self.num_features * self.cnn_output_length
        )
        
        # 3. Transformer Encoder
        encoder_layer = nn.TransformerEncoderLayer(
            d_model=self.d_model,
            nhead=n_heads,
            dim_feedforward=dim_feedforward,
            dropout=dropout,
            batch_first=True
        )
        self.transformer_encoder = nn.TransformerEncoder(encoder_layer, num_layers=num_layers)
        
        # 4. 表格数据处理器（TabularProcessor）
        if self.use_multimodal:
            # 表格特征维度 = HRV统计特征 + 人口统计学特征
            # HRV统计特征：从时间序列计算的统计量（均值、标准差、最大值、最小值等）
            # 人口统计学特征：性别（1维）
            tabular_input_dim = cfg.get('multimodal', {}).get('tabular_dim', self.num_features * 4 + 1)
            
            self.tabular_processor = nn.Sequential(
                nn.Linear(tabular_input_dim, 256),
                nn.BatchNorm1d(256),
                nn.ReLU(),
                nn.Dropout(dropout),
                nn.Linear(256, 128),
                nn.BatchNorm1d(128),
                nn.ReLU(),
                nn.Dropout(dropout),
                nn.Linear(128, self.d_model),
                nn.ReLU(),
                nn.Dropout(dropout)
            )
        
        # 5. 全局池化和分类头
        total_seq_len = self.num_features * self.cnn_output_length
        
        # 计算融合后的特征维度
        if self.use_multimodal:
            fusion_dim = self.d_model * total_seq_len + self.d_model  # Transformer输出 + 表格特征
        else:
            fusion_dim = self.d_model * total_seq_len  # 只有Transformer输出
        
        # Shared layer before task-specific heads
        self.shared_head = nn.Sequential(
            nn.Linear(fusion_dim, self.d_model * 2),
            nn.ReLU(),
            nn.Dropout(dropout),
            nn.Linear(self.d_model * 2, self.d_model),
            nn.ReLU(),
            nn.Dropout(dropout)
        )
        
        # Task-specific heads
        if self.task_type in ['age', 'both']:
            self.age_head = nn.Linear(self.d_model, 1)
        
        if self.task_type in ['gender', 'both']:
            self.gender_head = nn.Linear(self.d_model, 1)
    
    def forward(self, x_ts, x_tabular=None):
        """
        Args:
            x_ts: 时间序列数据 (batch_size, window_size, num_features)
            x_tabular: 表格数据 (batch_size, tabular_dim) - 可选
        
        Returns:
            根据task_type返回不同内容：
            - 'age': age_pred (batch_size,)
            - 'gender': gender_logit (batch_size,)
            - 'both': (age_pred, gender_logit)
        """
        batch_size, window_size, num_features = x_ts.shape
        
        # 1. 为每个特征独立应用CNN提取器
        cnn_outputs = []
        for i in range(self.num_features):
            # 提取第i个特征: (batch_size, window_size)
            feature_i = x_ts[:, :, i].unsqueeze(1)  # (batch_size, 1, window_size)
            
            # 通过CNN提取器
            cnn_out_i = self.cnn_extractors[i](feature_i)  # (batch_size, d_model, cnn_output_length)
            
            # 转置: (batch_size, cnn_output_length, d_model)
            cnn_out_i = cnn_out_i.permute(0, 2, 1)
            
            cnn_outputs.append(cnn_out_i)
        
        # 2. 拼接所有特征的CNN输出
        # (batch_size, num_features * cnn_output_length, d_model)
        aggregated_features = torch.cat(cnn_outputs, dim=1)
        
        # 3. 添加位置编码
        # 转置为 (seq_len, batch_size, d_model)
        aggregated_features_perm = aggregated_features.permute(1, 0, 2)
        aggregated_features_perm = self.pos_encoder(aggregated_features_perm)
        # 转回 (batch_size, seq_len, d_model)
        aggregated_features = aggregated_features_perm.permute(1, 0, 2)
        
        # 4. Transformer编码
        transformer_output = self.transformer_encoder(aggregated_features)  # (batch_size, seq_len, d_model)
        
        # 5. 展平Transformer输出
        flat_ts_features = transformer_output.reshape(batch_size, -1)  # (batch_size, seq_len * d_model)
        
        # 6. 如果使用多模态，处理表格数据并融合
        if self.use_multimodal and x_tabular is not None:
            # 处理表格数据
            tabular_features = self.tabular_processor(x_tabular)  # (batch_size, d_model)
            
            # 融合时间序列特征和表格特征
            fused_features = torch.cat([flat_ts_features, tabular_features], dim=1)
        else:
            fused_features = flat_ts_features
        
        # 7. 通过共享层
        shared_features = self.shared_head(fused_features)  # (batch_size, d_model)
        
        # 8. 任务特定预测
        if self.task_type == 'age':
            age_pred = self.age_head(shared_features).squeeze(-1)
            return age_pred
        
        elif self.task_type == 'gender':
            gender_logit = self.gender_head(shared_features).squeeze(-1)
            return gender_logit
        
        else:  # 'both'
            age_pred = self.age_head(shared_features).squeeze(-1)
            gender_logit = self.gender_head(shared_features).squeeze(-1)
            return age_pred, gender_logit


