import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
from torchvision import datasets, transforms
import matplotlib.pyplot as plt
import os
import json
from model import EarlyStopping
from config import DATA_DIR, BEST_MODEL_PATH, BEST_MODEL_JIT_PATH
import shutil


def evaluate_model(model, data_loader, criterion, device):
    """评估模型性能"""
    model.eval()
    total_loss = 0
    correct = 0
    total = 0

    with torch.no_grad():
        for images, labels in data_loader:
            images, labels = images.to(device), labels.to(device)
            outputs = model(images)
            loss = criterion(outputs, labels)
            total_loss += loss.item()
            _, predicted = torch.max(outputs.data, 1)
            total += labels.size(0)
            correct += (predicted == labels).sum().item()

    avg_loss = total_loss / len(data_loader)
    accuracy = 100 * correct / total
    return avg_loss, accuracy


def plot_training_history(run_dir, train_losses, train_accs, val_accs):
    """绘制训练历史图表"""
    plt.rcParams["font.sans-serif"] = ["SimHei"]
    plt.rcParams["axes.unicode_minus"] = False

    plt.figure(figsize=(12, 4))

    # 绘制损失曲线
    plt.subplot(1, 2, 1)
    plt.plot(train_losses, label="训练损失")
    plt.title("训练过程中的损失变化", fontsize=12)
    plt.xlabel("训练轮次", fontsize=10)
    plt.ylabel("损失值", fontsize=10)
    plt.legend(loc="best", fontsize=10)
    plt.grid(True, linestyle="--", alpha=0.6)

    # 绘制准确率曲线
    plt.subplot(1, 2, 2)
    plt.plot(train_accs, label="训练准确率")
    plt.plot(val_accs, label="验证准确率")
    plt.title("模型准确率变化", fontsize=12)
    plt.xlabel("训练轮次", fontsize=10)
    plt.ylabel("准确率 (%)", fontsize=10)
    plt.legend(loc="best", fontsize=10)
    plt.grid(True, linestyle="--", alpha=0.6)

    plt.tight_layout()
    plt.savefig(
        os.path.join(run_dir, "training_history.png"), 
        dpi=300, 
        bbox_inches="tight"
    )
    plt.close()


def save_model(model, epoch, optimizer, loss, accuracy, filepath, run_dir):
    """保存模型和训练状态到文件"""
    os.makedirs(os.path.dirname(filepath), exist_ok=True)

    # 保存常规模型检查点
    torch.save(
        {
            "epoch": epoch,
            "model_state_dict": model.state_dict(),
            "optimizer_state_dict": optimizer.state_dict(),
            "loss": loss,
            "accuracy": accuracy,
        },
        filepath,
    )

    # 如果是最佳模型，额外保存一份JIT编译的版本
    if "best_model" in filepath:
        try:
            print("正在保存JIT编译的模型...")
            model.eval()
            dummy_input = torch.randn(1, 1, 28, 28).to(next(model.parameters()).device)
            scripted_model = torch.jit.trace(model, dummy_input)

            # 在当前运行目录中保存JIT模型
            jit_filepath = filepath.replace(".pth", "_jit.pth")
            scripted_model.save(jit_filepath)

            # 在runs根目录下保存一份最新的最佳模型
            shutil.copy2(filepath, BEST_MODEL_PATH)
            shutil.copy2(jit_filepath, BEST_MODEL_JIT_PATH)

            print("最佳模型已保存")

        except Exception as e:
            print(f"保存JIT编译模型时出错: {str(e)}")
            print("仅保存了常规模型检查点")


def get_data_loaders(batch_size):
    """获取数据加载器"""
    transform_train = transforms.Compose(
        [
            transforms.RandomAffine(degrees=5, translate=(0.1, 0.1), scale=(0.9, 1.1)),
            transforms.ToTensor(),
            transforms.Normalize((0.1307,), (0.3081,)),
        ]
    )

    transform_test = transforms.Compose(
        [transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]
    )

    train_dataset = datasets.MNIST(
        root=DATA_DIR, train=True, download=True, transform=transform_train
    )

    test_dataset = datasets.MNIST(
        root=DATA_DIR, train=False, download=True, transform=transform_test
    )

    train_loader = DataLoader(
        train_dataset,
        batch_size=batch_size,
        shuffle=True,
        num_workers=0,
        pin_memory=True,
    )

    test_loader = DataLoader(
        test_dataset,
        batch_size=batch_size,
        shuffle=False,
        num_workers=0,
        pin_memory=True,
    )

    return train_loader, test_loader


def get_training_config(optimizer, epochs, steps_per_epoch):
    """获取训练配置"""
    scheduler = optim.lr_scheduler.OneCycleLR(
        optimizer,
        max_lr=0.01,
        epochs=epochs,
        steps_per_epoch=steps_per_epoch,
        pct_start=0.3,
        div_factor=25,
        final_div_factor=1000,
    )

    criterion = nn.CrossEntropyLoss()
    early_stopping = EarlyStopping(patience=5, min_delta=0.2, mode="max")

    return scheduler, criterion, early_stopping


def update_training_history(train_loss, train_accuracy, val_accuracy, run_dir, 
                          train_losses, train_accs, val_accs):
    """更新训练历史记录"""
    train_losses.append(train_loss)
    train_accs.append(train_accuracy)
    val_accs.append(val_accuracy)

    history = {
        "train_losses": train_losses,
        "train_accs": train_accs,
        "val_accs": val_accs,
    }
    with open(os.path.join(run_dir, "training_history.json"), "w") as f:
        json.dump(history, f)


def save_checkpoints(model, optimizer, epoch, loss, accuracy, run_dir):
    """保存训练检查点"""
    if (epoch + 1) % 5 == 0:  # 每5个epoch保存一次
        checkpoint_path = os.path.join(run_dir, f"checkpoint_epoch_{epoch+1}.pth")
        save_model(model, epoch, optimizer, loss, accuracy, checkpoint_path, run_dir)

    # 如果是最佳性能，保存最佳模型
    if accuracy > getattr(save_checkpoints, "best_accuracy", 0):
        save_checkpoints.best_accuracy = accuracy
        save_model(
            model,
            epoch,
            optimizer,
            loss,
            accuracy,
            os.path.join(run_dir, "best_model.pth"),
            run_dir
        )


def print_epoch_results(epoch, train_loss, train_accuracy, val_loss, val_accuracy, optimizer):
    """打印每个epoch的训练结果"""
    print(f"\n第 {epoch+1} 轮结果:")
    print(f"训练损失: {train_loss:.4f} | 训练准确率: {train_accuracy:.2f}%")
    print(f"验证损失: {val_loss:.4f} | 验证准确率: {val_accuracy:.2f}%")
    print(f"学习率: {optimizer.param_groups[0]['lr']:.1e}")
    print(f"{'-'*80}")


def finalize_training(model, optimizer, criterion, test_loader, device, run_dir,
                     train_losses, train_accs, val_accs):
    """完成训练后的最终处理"""
    plot_training_history(run_dir, train_losses, train_accs, val_accs)
    final_loss, final_accuracy = evaluate_model(model, test_loader, criterion, device)
    print("\n最终测试结果:")
    print(f"损失: {final_loss:.4f}")
    print(f"准确率: {final_accuracy:.2f}%") 