# src/train.py

import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.utils.data import DataLoader, random_split
from data_loader import ECGDataset
from model_wavelet import ECG_Autoencoder_Classifier_Lstm, ECG_Autoencoder_Classifier_Transformer, ECG_Autoencoder_Classifier_Mamba, ECG_Autoencoder_Classifier_Cnn# 导入新的模型类
import os
from model_time import ChannelWiseCNN
import numpy as np
import random
from torch.utils.tensorboard import SummaryWriter
from torch.utils.data import Dataset, DataLoader, WeightedRandomSampler
from imblearn.over_sampling import RandomOverSampler
from collections import Counter
from sklearn.metrics import precision_recall_fscore_support, roc_auc_score
from torchvision import transforms
from scipy import fft
from torch.amp import autocast, GradScaler  # 从 torch.amp 导入而不是 torch.cuda.amp

def calculate_metrics(labels, predictions, probs):
    # 计算准确率
    accuracy = np.mean(labels == predictions)
    # 计算精确率、召回率、F1分数
    precision, recall, f1, _ = precision_recall_fscore_support(labels, predictions, average='binary')
    # 计算 AUC-ROC
    auc_roc = roc_auc_score(labels, probs)
    return accuracy, precision, recall, f1, auc_roc

def train_model(model, dataloaders, criterion_recon, criterion_cls, optimizer, scheduler, num_epochs, device, learning_rate):
    writer = SummaryWriter('runs/ecg_training')
    
    # 创建检查点目录
    checkpoint_dir = os.path.join(os.getcwd(), 'checkpoints')
    os.makedirs(checkpoint_dir, exist_ok=True)
    
    best_model_wts = model.state_dict()
    best_auc_roc = 0.0  # 修改：使用AUC-ROC作为最佳指标
    patience = 30
    tolerance = 0.0005  # 修改：降低容差，因为AUC-ROC的变化幅度通常较小
    counter = 0
    
    # 添加学习率预热
    warmup_epochs = 10  # 增加预热轮数
    
    scaler = GradScaler("cuda") if torch.cuda.is_available() else GradScaler()  # 更新 GradScaler 初始化

    for epoch in range(num_epochs):
        print(f'第 {epoch+1}/{num_epochs} 个 Epoch')
        print('-' * 10)

        # 修改学习率预热逻辑
        if epoch < warmup_epochs:
            lr_scale = min(1., float(epoch + 1) / warmup_epochs)
            for pg in optimizer.param_groups:
                pg['lr'] = lr_scale * learning_rate * 0.1  # 降低初始学习率

        for phase in ['train', 'val']:
            if phase == 'train':
                model.train()
            else:
                model.eval()

            running_loss = 0.0
            running_recon_loss = 0.0
            running_cls_loss = 0.0
            
            all_labels = []
            all_preds = []
            all_probs = []

            for batch in dataloaders[phase]:
                inputs, labels, _ = batch  # 修改这里，添加 _ 来接收但忽略样本编号
                inputs = inputs.to(device)
                labels = labels.float().to(device)  # 确保 labels 是正确的形状

                # 修改：确保labels是正确的形状和类型用于bincount
                #batch_labels = labels.cpu().long().flatten()
                #print(f"Batch label distribution: {torch.bincount(batch_labels)}")

                optimizer.zero_grad()

                with autocast(device_type="cuda" if torch.cuda.is_available() else "cpu"):  # 更新 autocast 上下文管理器的使用
                    with torch.set_grad_enabled(phase == 'train'):
                        logits = model(inputs)
                        if isinstance(logits, tuple):
                            logits = logits[0]  # 如果模型返回元组，取第一个元素
                        
                        logits = logits.view(-1, 1)  # 确保 logits 和 labels 的维度匹配
                        labels = labels.view(-1, 1)
                        
                        loss_cls = criterion_cls(logits, labels)  # logits 和 labels 都是 [B, 1]
                        
                        loss = loss_cls
                        
                        # 提取概率和预测时不需要改变维度，因都是 [B, 1]
                        probs = torch.sigmoid(logits)
                        preds = (probs >= 0.55).float()

                if phase == 'train':
                    scaler.scale(loss).backward()
                    # 增加梯度裁剪阈值
                    torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=5.0)
                    scaler.step(optimizer)
                    scaler.update()

                # 收集预测结果
                all_labels.extend(labels.cpu().numpy().flatten())  # 添加 flatten()
                all_preds.extend(preds.detach().cpu().numpy().flatten())  # 添加 flatten()
                all_probs.extend(probs.detach().cpu().numpy().flatten())  # 添加 flatten()
                
                running_loss += loss.item() * inputs.size(0)
                #running_recon_loss += loss_recon.item() * inputs.size(0)
                running_cls_loss += loss_cls.item() * inputs.size(0)

            # 计算性能指标
            accuracy, precision, recall, f1, auc_roc = calculate_metrics(
                np.array(all_labels),
                np.array(all_preds),
                np.array(all_probs)
            )

            epoch_loss = running_loss / len(dataloaders[phase].dataset)

            # 记录和打印损失
            writer.add_scalar(f'{phase}/total_loss', epoch_loss, epoch)
            writer.add_scalar(f'{phase}/accuracy', accuracy, epoch)
            writer.add_scalar(f'{phase}/precision', precision, epoch)
            writer.add_scalar(f'{phase}/recall', recall, epoch)
            writer.add_scalar(f'{phase}/f1', f1, epoch)
            writer.add_scalar(f'{phase}/auc_roc', auc_roc, epoch)

            print(f'{phase} Loss: {epoch_loss:.4f}, Accuracy: {accuracy:.4f}, Precision: {precision:.4f}, '
                  f'Recall: {recall:.4f}, F1: {f1:.4f}, AUC-ROC: {auc_roc:.4f}')

            if phase == 'val' and scheduler is not None:
                scheduler.step(epoch_loss)
                
                # 修改：使用AUC-ROC进行早停
                if auc_roc > best_auc_roc + tolerance:
                    best_auc_roc = auc_roc
                    best_model_wts = model.state_dict()
                    counter = 0
                elif auc_roc > best_auc_roc - tolerance:
                    counter = 0
                else:
                    counter += 1

                if counter >= patience:
                    print(f'Early stopping at epoch {epoch+1}')
                    model.load_state_dict(best_model_wts)
                    writer.close()
                    return model

        # 修改检查点保存的评估指标
        if (epoch + 1) % 1 == 0:
            checkpoint_path = os.path.join(checkpoint_dir, f'epoch_{epoch+1}.pth')
            torch.save({
                'epoch': epoch,
                'model_state_dict': model.state_dict(),
                'optimizer_state_dict': optimizer.state_dict(),
                'loss': epoch_loss,
                'auc_roc_score': auc_roc,  # 修改：使用auc_roc替代f1
            }, checkpoint_path)
            print(f'Checkpoint saved: {checkpoint_path}')

        print()

    print(f'最佳验证AUC-ROC分数: {best_auc_roc:.4f}')  # 修改输出信息
    
    # 关闭tensorboard writer
    writer.close()

    # 加载最佳模型权重
    model.load_state_dict(best_model_wts)
    return model

class ResampledECGDataset(Dataset):
    def __init__(self, signals, labels):
        self.signals = signals
        self.labels = labels
    
    def __len__(self):
        return len(self.signals)
    
    def __getitem__(self, idx):
        return self.signals[idx], self.labels[idx]

# 添加频域数据增强
class FrequencyDomainTransform:
    def __init__(self, noise_factor=0.01):
        self.noise_factor = noise_factor
    
    def __call__(self, x):
        # 只对幅度谱添加噪声
        x[1] = x[1] + torch.randn_like(x[1]) * self.noise_factor
        return x

if __name__ == '__main__':
    # 更新数据目录路径
    train_data_dir = 'data/processed/train_data'
    test_data_dir = 'data/processed/test_data'  # 新增测试数据路径

    batch_size = 64  # 增大batch size
    num_epochs = 300  # 增加训练轮数
    learning_rate = 4e-4  # 增加学习率
    weight_decay = 1e-5  # 增加权重衰减

    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    # 设置随机种子
    random.seed(3407)
    np.random.seed(3407)
    torch.manual_seed(3407)
    if torch.cuda.is_available():
        torch.cuda.manual_seed(3407)
        torch.cuda.manual_seed_all(3407)
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = False

    # 数据加载
    train_dataset = ECGDataset(train_data_dir)
    val_dataset = ECGDataset(test_data_dir)

    # 采样策略配置
    SAMPLING_STRATEGY = 'weighted'  # 可选: 'weighted', 'under', 'none'

    # 计算类别权重和标签统计
    train_labels = []
    for idx in range(len(train_dataset)):
        _, label, _ = train_dataset[idx]  # 修改这里，添加 _ 来接收但忽略样本编号
        train_labels.append(int(label.item()))

    num_negatives = train_labels.count(1)  # 抑郁样本标签为1
    num_positives = train_labels.count(0)  # 正常样本标签为0
    print(f'原始数据 - 抑郁样本数: {num_negatives}, 正常样本数: {num_positives}')

    # 根据选择的采样策略创建相应的 DataLoader
    if SAMPLING_STRATEGY == 'weighted':
        # 加权重采样方式
        class_counts = Counter(train_labels)
        class_weights = {cls: 1.0 / count for cls, count in class_counts.items()}
        sample_weights = [class_weights[label] for label in train_labels]
        sampler = WeightedRandomSampler(sample_weights, num_samples=len(sample_weights), replacement=True)
        shuffle = False
        print("使用加权采样策略")
    
    elif SAMPLING_STRATEGY == 'under':
        # 欠采样方式
        min_samples = min(num_positives, num_negatives)
        positive_indices = [i for i, label in enumerate(train_labels) if label == 0]
        negative_indices = [i for i, label in enumerate(train_labels) if label == 1]
        
        # 随机选择和少数类相同数量的多数类样本
        if num_positives > num_negatives:
            sampled_positive_indices = random.sample(positive_indices, min_samples)
            sampled_indices = sampled_positive_indices + negative_indices
        else:
            sampled_negative_indices = random.sample(negative_indices, min_samples)
            sampled_indices = positive_indices + sampled_negative_indices
        
        sampler = torch.utils.data.SubsetRandomSampler(sampled_indices)
        shuffle = False
        print("使用欠采样策略")
        
    else:
        # 不使用任何采样策略
        sampler = None
        shuffle = True
        print("不使用采样策略")

    # 创建 DataLoader
    dataloaders = {
        'train': DataLoader(
            train_dataset,
            batch_size=batch_size,
            sampler=sampler,
            shuffle=shuffle if sampler is None else False,
            num_workers=0,
            pin_memory=True if torch.cuda.is_available() else False,
            worker_init_fn=lambda x: np.random.seed(3407 + x)
        ),
        'val': DataLoader(
            val_dataset,
            batch_size=batch_size,
            shuffle=False,
            num_workers=0,
            pin_memory=True if torch.cuda.is_available() else False
        )
    }

    # 计算类别权重
    train_labels = []
    for idx in range(len(train_dataset)):
        _, label, _ = train_dataset[idx]  # 修改这里，添加 _ 来接收但忽略样本编号
        train_labels.append(int(label.item()))

    num_negatives = train_labels.count(1)  # 抑郁样本标签为1
    num_positives = train_labels.count(0)  # 正常样本标签为0
    print(f'抑郁样本数: {num_negatives}, 正常样本数: {num_positives}')

    # 计算正样本权重用于损失函数
    pos_weight_value = num_negatives / num_positives
    pos_weight = torch.tensor([pos_weight_value], dtype=torch.float32).to(device)
    print(f'正样本权重: {pos_weight.item():.4f}')
    
    dataloaders = {
        'train': DataLoader(
            train_dataset,
            batch_size=batch_size,
            sampler=sampler,  # 使用采样器代替shuffle
            num_workers=0,
            pin_memory=True if torch.cuda.is_available() else False,
            worker_init_fn=lambda x: np.random.seed(3407 + x)
        ),
        'val': DataLoader(
            val_dataset,
            batch_size=batch_size,
            shuffle=False,
            num_workers=0,
            pin_memory=True if torch.cuda.is_available() else False
        )
    }

    # 修改模型初始化参数
    input_dim = 1  # 改为1，因为只有时域信号
    hidden_dim = 256
    latent_dim = 128
    num_layers = 8

    model = ChannelWiseCNN(input_dim, hidden_dim, latent_dim, num_layers)

    # 添加权重初始化
    def init_weights(m):
        if isinstance(m, nn.Linear):
            nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
            if m.bias is not None:
                nn.init.constant_(m.bias, 0)
        elif isinstance(m, nn.Conv1d):
            nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
            if m.bias is not None:
                nn.init.constant_(m.bias, 0)
        elif isinstance(m, nn.LSTM):
            for name, param in m.named_parameters():
                if 'weight_ih' in name:
                    nn.init.kaiming_normal_(param, mode='fan_out')
                elif 'weight_hh' in name:
                    nn.init.orthogonal_(param)
                elif 'bias' in name:
                    nn.init.constant_(param, 0)

    model.apply(init_weights)  # 应用权重初始化
    model = model.to(device)

    # 损失函数和优化器
    criterion_recon = nn.MSELoss(reduction='mean')
    criterion_cls = nn.BCEWithLogitsLoss(
        pos_weight=torch.tensor([1.0]).to(device)  # 轻微增加正样本权重
    )
    optimizer = optim.Adam(  # 使用 Adam 优化器
        model.parameters(),
        lr=learning_rate,
        weight_decay=weight_decay
    )

    # 添加学习率调度器
    scheduler = optim.lr_scheduler.CosineAnnealingWarmRestarts(
        optimizer,
        T_0=3,  # 调整 T_0 参数
        T_mult=1,  # 调整 T_mult 参数
        eta_min=1e-6
    )

    # 训练模型（添加scheduler参数）
    model = train_model(
        model, 
        dataloaders, 
        criterion_recon, 
        criterion_cls, 
        optimizer,
        scheduler,  # 新增参数
        num_epochs,
        device,
        learning_rate  # 新增参数
    )

    # 保存模型
    os.makedirs('saved_models', exist_ok=True)
    torch.save(model.state_dict(), 'saved_models/best_model.pth')
