import shutil
from os import path

import numpy as np
import torch
from data_preprocessing import FundusDataset, FundusPreprocessor
from global_variables import GlobalVariables
from logger import logger
from model import DualFundusClassifier
from sklearn.metrics import (
    average_precision_score,
    precision_score,
    recall_score,
)
from sklearn.model_selection import train_test_split
from torch.nn import BCELoss
from torch.optim import Adam
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torch.utils.data import DataLoader


def calculate_metrics(all_labels, all_preds):
    """计算多标签分类指标"""
    # 转换为numpy数组并应用阈值
    all_labels = np.array(all_labels)
    all_preds = np.array(all_preds) > 0.8

    # 平均精确度（mAP），macro->直接平均所有标签的 AP，平等对待每个标签
    map_score = average_precision_score(all_labels, all_preds, average="macro")

    # 精确率和召回率（宏平均）
    precision = precision_score(all_labels, all_preds, average="macro", zero_division=0)
    recall = recall_score(all_labels, all_preds, average="macro", zero_division=0)

    return {
        "mAP": map_score,
        "precision": precision,
        "recall": recall,
    }


def train_model_epoch(
    model, train_loader, test_loader, optimizer, criterion, device, scheduler
):
    # 训练阶段
    GlobalVariables.mode="train"
    model.train()
    running_loss = 0.0
    batch_count = 0
    total_batches = len(train_loader)

    for batch in train_loader:
        inputs = batch["image"].to(device)
        age = batch["age"].to(device)
        sex = batch["sex"].to(device)
        labels = batch["labels"].to(device)

        # 清空梯度
        optimizer.zero_grad()
        outputs = model(inputs, age, sex)
        loss = criterion(outputs, labels)
        # 计算梯度
        loss.backward()
        # 更新模型参数
        optimizer.step()

        running_loss += loss.item() * inputs.size(0)

        # 进度打印
        batch_count += 1
        logger.info(
            f"Epoch {GlobalVariables.epoch}/{GlobalVariables.num_epochs} | Batch {batch_count}/{total_batches} | "
            f"Loss: {loss.item():.4f}"
        )

    # 测试阶段（每个epoch结束后执行）
    GlobalVariables.mode = "test"
    model.eval()
    all_preds = []
    all_labels = []

    with torch.no_grad():
        for batch in test_loader:
            inputs = batch["image"].to(device)
            age = batch["age"].to(device)
            sex = batch["sex"].to(device)
            labels = batch["labels"].to(device)
            outputs = model(inputs, age, sex)
            all_preds.extend(outputs.cpu().numpy())
            all_labels.extend(labels.numpy())
            print(outputs.cpu().numpy())
            print(labels.numpy())
    # 计算指标（每个epoch结束后执行）
    metrics = calculate_metrics(all_labels, all_preds)
    map_score = metrics["mAP"]
    precision = metrics["precision"]
    recall = metrics["recall"]
    scheduler.step(map_score)

    logger.info(
        f"Epoch {GlobalVariables.epoch}/{GlobalVariables.num_epochs} | "
        f"Loss: {running_loss / len(train_loader):.4f} | "
        f"mAP: {map_score:.4f}  | "
        f"Precision: {precision:.4f} | Recall: {recall:.4f}"
    )

    # 保存最佳模型（每个epoch结束后执行）
    if (
        map_score > GlobalVariables.best_acc
        and precision >= GlobalVariables.standard_prec
        and recall >= GlobalVariables.standard_rec
    ):
        GlobalVariables.best_acc = map_score
        torch.save(model.state_dict(), r"models/best_model.pth")
        logger.info("找到符合要求的最佳模型并已保存！")
    elif GlobalVariables.epoch % 5 ==0:
        torch.save(model.state_dict(), r"models/model_epoch_{}.pth".format(GlobalVariables.epoch))


def train_model():
    # 配置参数
    data_dir = (
        GlobalVariables.sample_data_dir
        if GlobalVariables.sample
        else GlobalVariables.data_dir
    )
    excel_path = (
        GlobalVariables.sample_excel_path
        if GlobalVariables.sample
        else GlobalVariables.excel_path
    )

    # 初始化预处理器
    preprocessor = FundusPreprocessor(data_dir, excel_path)

    # 加载数据
    df = preprocessor.load_data()

    # 数据集划分（保持时间序列特性）
    train_df, test_df = train_test_split(df, test_size=0.2, random_state=42)
    GlobalVariables.train_data_size = len(train_df)
    GlobalVariables.test_data_size = len(test_df)

    # 创建数据加载器
    train_dataset = FundusDataset(train_df, preprocessor)
    test_dataset = FundusDataset(test_df, preprocessor)

    # 设置num_workers=0以适配Windows
    # Linux/Unix系统通过fork()快速复制进程，而Windows只能通过spawn方式创建新进程
    # spawn需要重新导入整个Python模块（包括你的训练脚本），可能导致无限递归创建子进程（引发崩溃） 全局变量或路径配置异常（如NameError）
    train_loader = DataLoader(
        train_dataset,
        batch_size=GlobalVariables.batch_size,
        shuffle=True,
        num_workers=0,
    )
    test_loader = DataLoader(
        test_dataset, batch_size=GlobalVariables.batch_size, num_workers=0
    )

    # 初始化模型
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    model = DualFundusClassifier().to(device)

    # 修改损失函数为BCELoss（因为模型输出已包含Sigmoid）
    # 计算动态类别权重（在每次训练前重新计算）
    count_of_ones = train_df[GlobalVariables.label_columns].eq(1).sum().values
    total_count = count_of_ones.sum()
    
    # 添加平滑处理防止零样本问题
    class_weights = (count_of_ones + 1) / (total_count + len(count_of_ones))
    class_weights = torch.tensor(class_weights).float()
    logger.info(f"动态计算类别权重: {class_weights.numpy()}")
    criterion = BCELoss(weight=class_weights.to(device))

    # 直接负责模型参数的更新，通过optimizer.step()执行参数更新
    optimizer = Adam(
        model.parameters(),
        lr=GlobalVariables.learning_rate,
        weight_decay=5e-5,
    )

    # 调整学习率调度器
    scheduler = ReduceLROnPlateau(
        optimizer,
        mode="max",
        factor=0.3,
        patience=5,
    )

    for epoch in range(GlobalVariables.num_epochs):
        GlobalVariables.epoch += 1
        GlobalVariables.train_current_pos = 0
        GlobalVariables.test_current_pos = 0
        train_model_epoch(
            model, train_loader, test_loader, optimizer, criterion, device, scheduler
        )


if __name__ == "__main__":
    if GlobalVariables.sample and path.exists(GlobalVariables.sample_save_dir):
        shutil.rmtree(GlobalVariables.sample_save_dir)
    # 增加内存优化配置
    torch.backends.cudnn.benchmark = False
    torch.backends.cudnn.deterministic = True
    train_model()
