import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
import torch.nn.functional as F
import numpy as np


def train_with_validation(model,
                          X_train, y_train,
                          X_val, y_val,
                          X_test, y_test,
                          num_epochs=100,
                          batch_size=32,
                          lr=0.001,
                          weight_decay=1e-4,
                          save_path="best_model.pth",
                          patience=3,
                          enable_confidence_weight=True
                          ):
    # 设备选择
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    model = model.to(device)

    # 置信度加权损失函数（保留原有逻辑）
    def confidence_weighted_loss(outputs, labels, alpha=0.1):
        probs = F.softmax(outputs, dim=1)
        confidences = torch.gather(probs, 1, labels.unsqueeze(1)).squeeze()
        weights = 1.0 - confidences + alpha  # 低置信样本权重更高
        weights = weights / weights.mean()  # 权重归一化
        base_loss = criterion(outputs, labels)
        return (base_loss * weights).mean()

    # 数据加载
    from Dateset_1 import CSIDataset
    train_dataset = CSIDataset(X_train, y_train, is_train=True)
    val_dataset = CSIDataset(X_val, y_val, is_train=False)
    test_dataset = CSIDataset(X_test, y_test, is_train=False)

    train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=0)
    val_loader = DataLoader(val_dataset, batch_size=batch_size, shuffle=False, num_workers=0)
    test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False, num_workers=0)

    # 损失函数与优化器
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.Adam(model.parameters(), lr=lr, weight_decay=weight_decay)

    # 跟踪训练指标
    best_val_acc = 0.0
    best_epoch = 0
    early_stop_counter = 0

    # 打印表头（移除微调相关指标）
    print(f"使用设备: {device}")
    print(f"数据规模：训练集{len(X_train)} | 验证集{len(X_val)} | 测试集{len(X_test)}")
    print("=" * 160)
    print(
        f"{'轮次':<6} | {'训练Loss':<10} | {'训练Acc':<10} | {'验证Loss':<10} | {'验证Acc':<10} | "
        f"{'测试Acc':<10} | {'最佳轮次':<8} | {'早停计数':<8}"
    )
    print("-" * 160)

    for epoch in range(num_epochs):
        # 1. 主训练阶段
        model.train()
        train_total_loss, train_correct, train_total = 0.0, 0, 0
        for X_batch, y_batch in train_loader:
            X_batch, y_batch = X_batch.to(device), y_batch.to(device)
            optimizer.zero_grad()
            outputs = model(X_batch)
            # 选择损失函数（置信度加权或普通交叉熵）
            if enable_confidence_weight:
                loss = confidence_weighted_loss(outputs, y_batch)
            else:
                loss = criterion(outputs, y_batch)
            loss.backward()
            optimizer.step()

            # 累加训练指标
            train_total_loss += loss.item() * X_batch.size(0)
            _, preds = torch.max(outputs, 1)
            train_total += y_batch.size(0)
            train_correct += (preds == y_batch).sum().item()
        train_avg_loss = round(train_total_loss / train_total, 4)
        train_acc = round(100 * train_correct / train_total, 2)

        # 2. 验证阶段
        model.eval()
        val_total_loss, val_correct, val_total = 0.0, 0, 0
        with torch.no_grad():
            for X_batch, y_batch in val_loader:
                X_batch, y_batch = X_batch.to(device), y_batch.to(device)
                outputs = model(X_batch)
                loss = criterion(outputs, y_batch)
                val_total_loss += loss.item() * X_batch.size(0)
                _, preds = torch.max(outputs, 1)
                val_total += y_batch.size(0)
                val_correct += (preds == y_batch).sum().item()
        val_avg_loss = round(val_total_loss / val_total, 4)
        val_acc = round(100 * val_correct / val_total, 2)

        # 3. 测试集评估（仅原始测试，无微调）
        test_correct, test_total = 0, 0
        with torch.no_grad():
            for X_batch, y_batch in test_loader:
                X_batch, y_batch = X_batch.to(device), y_batch.to(device)
                outputs = model(X_batch)
                _, preds = torch.max(outputs, 1)
                test_total += y_batch.size(0)
                test_correct += (preds == y_batch).sum().item()
        test_acc = round(100 * test_correct / test_total, 2)

        # 4. 早停与模型保存
        current_best = ""
        if val_acc >= best_val_acc:
            best_val_acc = val_acc
            best_epoch = epoch + 1
            early_stop_counter = 0
            torch.save(model.state_dict(), save_path)
            current_best = f"{best_epoch}"
        else:
            early_stop_counter += 1

        # 5. 打印本轮指标
        print(
            f"{epoch + 1:<6} | {train_avg_loss:<10} | {train_acc:<10} | "
            f"{val_avg_loss:<10} | {val_acc:<10} | {test_acc:<10} | "
            f"{current_best:<8} | {early_stop_counter}/{patience}"
        )

        # 早停判断
        if early_stop_counter >= patience:
            print(f"\n早停触发！连续{patience}轮验证准确率未提升，停止训练")
            break

    # 最终评估（仅原始测试准确率）
    model.load_state_dict(torch.load(save_path))
    model.eval()
    final_test_correct, final_test_total = 0, 0
    with torch.no_grad():
        for X_batch, y_batch in test_loader:
            X_batch, y_batch = X_batch.to(device), y_batch.to(device)
            outputs = model(X_batch)
            _, preds = torch.max(outputs, 1)
            final_test_total += y_batch.size(0)
            final_test_correct += (preds == y_batch).sum().item()
    final_test_acc = round(100 * final_test_correct / final_test_total, 2)

    print("=" * 160)
    print(f"训练完成！")
    print(f"最佳验证准确率: {best_val_acc:.2f}%（第{best_epoch}轮）")
    print(f"最终测试准确率: {final_test_acc:.2f}%")
    print("=" * 160)
    return model
