import os
import time
import torch
import argparse
import pandas as pd
import torch.nn as nn
import matplotlib.pyplot as plt

from model.Vgg import VGG16
from model.ResNet import ResNet18
from model.SwinTransformer import SwinTransformer
from torch.optim.lr_scheduler import LinearLR, SequentialLR, CosineAnnealingLR


def parse_option():
    parser = argparse.ArgumentParser()
    parser.add_argument('--model_type', default='SwinTransformer', type=str, choices=['SwinTransformer', 'Vgg16', 'ResNet18','EfficientNet'])
    parser.add_argument('--batch_size', type=int, default=64, help='batch-size')
    parser.add_argument('--num_workers', type=int, default=4, help='数据加载进程数')
    parser.add_argument('--epochs', type=int, default=20, help='epochs')
    parser.add_argument('--gpu', type=int, default=0, help='gpu id')
    parser.add_argument('--lr', type=float, default=0.001, help='learning rate')
    parser.add_argument('--weight_decay', type=float, default=1e-4, help='weight decay')
    args, unknown = parser.parse_known_args()

    return args


def build_resnet18_tiny_imagenet():
    model = ResNet18(num_classes=200)
    return model

def build_swin_tiny_imagenet(pretrained=False):
    """
    构建适配Tiny ImageNet的Swin-Tiny模型
    参数:
        pretrained: 是否加载ImageNet预训练权重（需自行处理尺寸不匹配问题）
    返回:
        配置优化的Swin Transformer模型
    """
    # --- 核心参数调整 ---
    model = SwinTransformer(
        img_size=64,  # 匹配Tiny ImageNet的64x64输入
        patch_size=4,  # 4x4 patches（16维序列长度）
        in_chans=3,  # RGB输入
        num_classes=200,  # Tiny ImageNet的200类别
        embed_dim=64,  # 减小嵌入维度（原96）
        depths=[1, 1, 2, 2],  # 减少块数（原[2,2,6,2]）
        num_heads=[2, 4, 8, 16],  # 减少注意力头数（原[3,6,12,24]）
        window_size=4,  # 窗口大小=8（64/8=8整除）
        mlp_ratio=2.0,  # MLP扩展比（保持默认）
        qkv_bias=True,  # 保留QKV偏置
        drop_rate=0.5,  # 更高的Dropout（防过拟合）
        drop_path_rate=0.2,  # Stochastic Depth概率
        ape=False,  # 禁用绝对位置编码（小尺寸不需要）
        patch_norm=True,  # 保留Patch Normalization
        use_checkpoint=False  # 禁用检查点（节省内存）
    )

    # --- 模型验证（关键！）---
    print("\n=== 模型结构验证 ===")

    # 1. 输入输出形状验证
    dummy_input = torch.randn(2, 3, 64, 64)
    output = model(dummy_input)
    print("输入形状:", dummy_input.shape)
    print("输出形状:", output.shape)  # 应为[2,200]
    assert output.shape == (2, 200), f"输出形状错误，应为(2,200)，实际{output.shape}"

    # 2. 梯度通路验证
    model.zero_grad()
    output.sum().backward()
    grad_means = [p.grad.abs().mean() for p in model.parameters() if p.grad is not None]
    print("梯度均值范围:", f"[{min(grad_means):.3e}, {max(grad_means):.3e}]")
    assert all(g > 1e-6 for g in grad_means), "存在梯度消失的层！"

    # 3. 参数量统计
    total_params = sum(p.numel() for p in model.parameters())
    trainable_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
    print(f"\n总参数量: {total_params / 1e6:.2f}M")
    print(f"可训练参数量: {trainable_params / 1e6:.2f}M")
    assert total_params < 30e6, "模型过大，需进一步缩减！"  # 目标<30M

    return model


def build_vgg_tiny_imagenet(num_classes=200, dropout=0.4, in_channels=3, pretrained=False):
    """
    构建适配Tiny ImageNet的VGG模型
    参数:
        pretrained: 是否加载ImageNet预训练权重（需自行处理尺寸不匹配问题）
    返回:
        配置优化的VGG模型
    """
    model = VGG16(num_classes=num_classes, dropout=dropout, in_channels=in_channels)

    # --- 模型验证（关键！）---
    print("\n=== 模型结构验证 ===")

    # 1. 输入输出形状验证
    dummy_input = torch.randn(64, 3, 64, 64)
    output = model(dummy_input)
    print("输入形状:", dummy_input.shape)
    print("输出形状:", output.shape)  # 应为[2,200]
    assert output.shape == (64, 200), f"输出形状错误，应为(2,200)，实际{output.shape}"

    return model


def train_process(model, train_loader, val_loader, epochs=20, gpu=0, lr=0.001, weight_decay=1e-4):
    """
    模型训练模板，对于大多数训练任务都可以进行代码复用
    Args:
        model: 指定的模型
        train_loader: 训练集数据加载器
        val_loader: 验证集数据加载器
        epochs: 训练轮次
        gpu: 指定的gpu
        lr: 学习率
        weight_decay: 正则化，权重衰减

    Returns: history--用来记录训练过程中产生的全部数据

    """
    device = torch.device(f"cuda:{gpu}" if torch.cuda.is_available() else "cpu")
    model = model.to(device)

    # 优化器配置     # 对不同层设置不同学习率（Transformer常用）
    params = []
    for name, param in model.named_parameters():
        if 'head' in name:  # 分类头使用更大LR
            params.append({'params': param, 'lr': lr * 10})
        else:
            params.append({'params': param, 'lr': lr})

    optimizer = torch.optim.AdamW(params, weight_decay=weight_decay)

    # 学习率预热 Warmup
    warmup_epochs = 5
    scheduler1 = LinearLR(optimizer, start_factor=0.01, total_iters=warmup_epochs)
    scheduler2 = CosineAnnealingLR(optimizer, T_max=epochs - warmup_epochs)
    scheduler = SequentialLR(optimizer, [scheduler1, scheduler2], [warmup_epochs])

    # 定义损失函数，设置标签平滑
    criterion = nn.CrossEntropyLoss(label_smoothing=0.1)
    history = {'train_loss': [], 'val_loss': [], 'train_acc': [], 'val_acc': []}
    since = time.time()

    # 创建weights文件夹（如果不存在）
    os.makedirs('./weights', exist_ok=True)

    # 跟踪最佳模型
    best_val_acc = 0.0
    best_model_state = None

    for epoch in range(epochs):
        # --- 训练阶段 ---
        model.train()
        running_loss, correct, total = 0.0, 0, 0

        for batch_idx, (inputs, targets) in enumerate(train_loader):
            inputs, targets = inputs.to(device), targets.to(device)

            # 前向+反向
            outputs = model(inputs)
            loss = criterion(outputs, targets)

            optimizer.zero_grad()
            loss.backward()
            torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)
            optimizer.step()

            # 统计指标
            running_loss += loss.item()
            _, predicted = outputs.max(1)
            correct += predicted.eq(targets).sum().item()
            total += targets.size(0)

            if batch_idx % 100 == 0:
                print(f"Epoch {epoch + 1}/{epochs} | Batch {batch_idx}/{len(train_loader)} "
                      f"| Loss: {loss.item():.4f} | LR: {scheduler.get_last_lr()[0]:.6f}")

        train_loss = running_loss / len(train_loader)
        train_acc = 100. * correct / total
        history['train_loss'].append(train_loss)
        history['train_acc'].append(train_acc)

        # --- 验证阶段 ---
        model.eval()
        val_loss, val_correct, val_total = 0.0, 0, 0

        with torch.no_grad():
            for inputs, targets in val_loader:
                inputs, targets = inputs.to(device), targets.to(device)
                outputs = model(inputs)
                val_loss += criterion(outputs, targets).item()

                _, predicted = outputs.max(1)
                val_correct += predicted.eq(targets).sum().item()
                val_total += targets.size(0)

        val_loss /= len(val_loader)
        val_acc = 100. * val_correct / val_total
        history['val_loss'].append(val_loss)
        history['val_acc'].append(val_acc)

        # 更新最佳模型
        if val_acc > best_val_acc:
            best_val_acc = val_acc
            best_model_state = model.state_dict().copy()  # 深拷贝最佳模型状态
            print(f"🎯 新的最佳模型！验证准确率: {val_acc:.2f}%")

        # 更新学习率
        scheduler.step()

        # 打印epoch总结
        print(f"\nEpoch {epoch + 1} Summary:")
        print(f"Train Loss: {train_loss:.4f} | Train Acc: {train_acc:.2f}%")
        print(f"Val Loss: {val_loss:.4f} | Val Acc: {val_acc:.2f}%")
        print("-" * 50)

    # 保存最佳模型到weights文件夹
    if best_model_state is not None:
        timestamp = time.strftime("%Y%m%d_%H%M%S")
        model_name = model.__class__.__name__  # 获取模型类名
        best_model_path = f"./weights/{model_name}_best_acc{best_val_acc:.1f}_{timestamp}.pth"

        torch.save({
            'model_state_dict': best_model_state,
            'val_accuracy': best_val_acc,
            'epochs': epochs,
            'timestamp': timestamp
        }, best_model_path)
        print(f"✅ 最佳模型已保存到: {best_model_path}")

    # 同时保存最终模型（可选）
    final_model_path = f"./weights/{model_name}_final_{timestamp}.pth"
    torch.save(model.state_dict(), final_model_path)
    print(f"📁 最终模型已保存到: {final_model_path}")

    # 保存训练历史数据到train_result文件夹
    train_process_df = pd.DataFrame(history)
    train_process_df['epoch'] = range(1, epochs + 1)
    train_process_df.set_index('epoch', inplace=True)

    os.makedirs('./train_result', exist_ok=True)
    csv_path = f"./train_result/training_history_{timestamp}.csv"
    train_process_df.to_csv(csv_path)

    print(f"\nTraining completed in {time.time() - since:.0f}s")
    print(f"Training history saved to {csv_path}")

    return history

# --- 4. 结果可视化 ---
def plot_results(history, model_type):
    # 确保保存目录存在
    os.makedirs('./train_result', exist_ok=True)

    plt.figure(figsize=(12, 4))

    # 损失曲线
    plt.subplot(1, 2, 1)
    plt.plot(history['train_loss'], label='Train')
    plt.plot(history['val_loss'], label='Validation')
    plt.title('Loss Curve')
    plt.xlabel('Epoch')
    plt.ylabel('Loss')
    plt.legend()

    # 准确率曲线
    plt.subplot(1, 2, 2)
    plt.plot(history['train_acc'], label='Train')
    plt.plot(history['val_acc'], label='Validation')
    plt.title('Accuracy Curve')
    plt.xlabel('Epoch')
    plt.ylabel('Accuracy (%)')
    plt.legend()

    plt.tight_layout()
    timestamp = time.strftime("%Y%m%d_%H%M%S")
    # 保存图像
    plt.savefig(f'./train_result/{model_type}_training_curves_{timestamp}.png', dpi=300, bbox_inches='tight')
    plt.close()