#!/usr/bin/env python
# -*- coding: utf-8 -*-

"""
PyTorch模型压缩示例 - CIFAR-10上的ResNet18
"""

import os
import argparse
import torch
import torch.nn as nn
import torchvision.models as models
import torchvision.transforms as transforms
import torchvision.datasets as datasets
from torch.utils.data import DataLoader
import logging
import time
import copy
import sys

# 添加项目根目录到系统路径
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))

# 导入自定义模块
from uamcf.methods.quantization import Quantization
from uamcf.adapters.pytorch import PyTorchAdapter
from uamcf.utils.visualizer import plot_compression_results

# 设置日志
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger("main")


def parse_args():
    parser = argparse.ArgumentParser(description="PyTorch模型压缩示例")
    parser.add_argument("--data-dir", default="./data", help="数据集存放目录")
    parser.add_argument("--output-dir", default="./output", help="输出目录")
    parser.add_argument("--batch-size", type=int, default=128, help="批处理大小")
    parser.add_argument("--epochs", type=int, default=5, help="微调轮数")
    parser.add_argument("--bits", type=int, default=4, help="量化位宽")
    parser.add_argument("--scheme", default="asymmetric", choices=["symmetric", "asymmetric"], help="量化方案")
    parser.add_argument("--per-channel", action="store_true", help="是否按通道量化")
    return parser.parse_args()


def prepare_model_for_cifar10(original_model):
    """修改ResNet模型以适应CIFAR-10数据集（10个类别）"""
    model = original_model
    # 替换最后一个全连接层，将输出维度改为10
    num_ftrs = model.fc.in_features
    model.fc = nn.Linear(num_ftrs, 10)
    return model


def finetune_model(model, train_loader, val_loader, epochs=5, device="cuda"):
    """在CIFAR-10上微调模型"""
    model = model.to(device)
    model.train()

    # 定义损失函数和优化器
    criterion = nn.CrossEntropyLoss()
    optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
    scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=2, gamma=0.1)

    best_val_acc = 0.0
    best_model = None

    logger.info("开始微调模型...")
    for epoch in range(epochs):
        # 训练阶段
        model.train()
        running_loss = 0.0
        correct = 0
        total = 0

        for inputs, labels in train_loader:
            inputs, labels = inputs.to(device), labels.to(device)

            optimizer.zero_grad()
            outputs = model(inputs)
            loss = criterion(outputs, labels)
            loss.backward()
            optimizer.step()

            running_loss += loss.item()
            _, predicted = torch.max(outputs.data, 1)
            total += labels.size(0)
            correct += (predicted == labels).sum().item()

        train_loss = running_loss / len(train_loader)
        train_acc = correct / total

        # 验证阶段
        model.eval()
        val_loss = 0.0
        val_correct = 0
        val_total = 0

        with torch.no_grad():
            for inputs, labels in val_loader:
                inputs, labels = inputs.to(device), labels.to(device)

                outputs = model(inputs)
                loss = criterion(outputs, labels)

                val_loss += loss.item()
                _, predicted = torch.max(outputs.data, 1)
                val_total += labels.size(0)
                val_correct += (predicted == labels).sum().item()

        val_loss = val_loss / len(val_loader)
        val_acc = val_correct / val_total

        logger.info(
            f"Epoch {epoch + 1}/{epochs}: Train Loss={train_loss:.4f}, Train Acc={train_acc:.4f}, Val Loss={val_loss:.4f}, Val Acc={val_acc:.4f}")

        # 保存最好的模型
        if val_acc > best_val_acc:
            best_val_acc = val_acc
            best_model = copy.deepcopy(model.state_dict())
            logger.info(f"保存新的最佳模型，验证准确率: {val_acc:.4f}")

        scheduler.step()

    # 加载最佳模型
    if best_model:
        model.load_state_dict(best_model)
        logger.info(f"加载最佳微调模型，验证准确率: {best_val_acc:.4f}")

    model.eval()
    return model


def evaluate_accuracy(model, test_loader, device="cuda"):
    """评估模型在测试集上的准确率"""
    model.eval()
    model = model.to(device)

    correct = 0
    total = 0

    with torch.no_grad():
        for inputs, labels in test_loader:
            inputs, labels = inputs.to(device), labels.to(device)  # 修正这一行
            outputs = model(inputs)
            _, predicted = torch.max(outputs.data, 1)
            total += labels.size(0)
            correct += (predicted == labels).sum().item()

    accuracy = correct / total
    logger.info(f"测试集准确率: {accuracy:.4f}")
    return accuracy


def main():
    args = parse_args()

    # 设置设备
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    # 创建输出目录
    os.makedirs(args.output_dir, exist_ok=True)

    # 准备数据集
    transform_train = transforms.Compose([
        transforms.RandomCrop(32, padding=4),
        transforms.RandomHorizontalFlip(),
        transforms.Resize(224),  # ResNet18需要224x224的输入
        transforms.ToTensor(),
        transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
    ])

    transform_test = transforms.Compose([
        transforms.Resize(224),  # ResNet18需要224x224的输入
        transforms.ToTensor(),
        transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
    ])

    # 加载CIFAR-10数据集
    trainset = datasets.CIFAR10(root=args.data_dir, train=True, download=True, transform=transform_train)
    testset = datasets.CIFAR10(root=args.data_dir, train=False, download=True, transform=transform_test)

    # 分割训练集以创建验证集
    train_size = int(0.8 * len(trainset))
    val_size = len(trainset) - train_size
    trainset, valset = torch.utils.data.random_split(trainset, [train_size, val_size])

    # 创建数据加载器
    train_loader = DataLoader(trainset, batch_size=args.batch_size, shuffle=True, num_workers=2)
    val_loader = DataLoader(valset, batch_size=args.batch_size, shuffle=False, num_workers=2)
    test_loader = DataLoader(testset, batch_size=args.batch_size, shuffle=False, num_workers=2)

    # 加载预训练的ResNet18模型
    print("加载resnet18模型...")
    original_model = models.resnet18(pretrained=True)

    # 准备并微调模型用于CIFAR-10
    print("准备并微调模型用于CIFAR-10...")
    cifar10_model = prepare_model_for_cifar10(original_model)
    cifar10_model = finetune_model(cifar10_model, train_loader, val_loader, epochs=args.epochs, device=device)

    # 评估原始微调模型
    print("评估原始微调模型...")
    original_accuracy = evaluate_accuracy(cifar10_model, test_loader, device=device)

    # 保存微调后的模型
    torch.save(cifar10_model.state_dict(), os.path.join(args.output_dir, "resnet18_finetuned.pth"))
    print(f"微调模型已保存到: {os.path.join(args.output_dir, 'resnet18_finetuned.pth')}")

    # 压缩模型
    print("压缩模型...")
    adapter = PyTorchAdapter(device=device)

    # 设置量化方法配置
    quantization_config = {
        "bits": args.bits,
        "scheme": args.scheme,
        "per_channel": args.per_channel
    }

    # 直接实例化量化方法
    quantization = Quantization(quantization_config)

    # 复制模型以便进行量化
    compressed_model = models.resnet18(pretrained=False)
    compressed_model = prepare_model_for_cifar10(compressed_model)
    compressed_model.load_state_dict(cifar10_model.state_dict())
    compressed_model = compressed_model.to(device)

    # 应用量化到模型的各层
    print("正在对模型进行量化...")
    for name, module in compressed_model.named_modules():
        if isinstance(module, (nn.Conv2d, nn.Linear)):
            # 跳过最后的全连接层以保留微调效果
            if name != "fc":
                quantized_module = quantization.compress(module)
                if hasattr(quantized_module, "_quantized"):
                    # 使用我们实现的替换层功能
                    compressed_model = adapter.replace_layer(compressed_model, name, quantized_module)
                    print(f"成功量化层: {name}")

    # 评估压缩模型
    print("评估压缩模型...")
    compressed_accuracy = evaluate_accuracy(compressed_model, test_loader, device=device)

    # 保存压缩后的模型
    torch.save(compressed_model.state_dict(), os.path.join(args.output_dir, "resnet18_compressed.pth"))
    print(f"压缩模型已保存到: {os.path.join(args.output_dir, 'resnet18_compressed.pth')}")

    # 计算模型大小
    original_size_bytes = adapter.get_model_size(cifar10_model)
    compressed_size_bytes = adapter.get_model_size(compressed_model)

    original_size_mb = original_size_bytes / (1024 * 1024)
    compressed_size_mb = compressed_size_bytes / (1024 * 1024)

    # 测量推理延迟
    print("测量推理延迟...")
    sample_input = next(iter(test_loader))[0][:1].to(device)
    original_latency = adapter.measure_latency(cifar10_model, sample_input)
    compressed_latency = adapter.measure_latency(compressed_model, sample_input)

    # 输出比较结果
    print("\n========== 压缩结果 ==========")
    print(f"原始模型大小: {original_size_mb:.2f} MB")
    print(f"压缩模型大小: {compressed_size_mb:.2f} MB")
    print(f"压缩比例: {original_size_mb / compressed_size_mb:.2f}x")
    print(f"原始模型准确率: {original_accuracy:.4f}")
    print(f"压缩模型准确率: {compressed_accuracy:.4f}")
    print(f"准确率变化: {(compressed_accuracy - original_accuracy) * 100:+.2f}%")
    print(f"原始模型延迟: {original_latency:.2f} ms")
    print(f"压缩模型延迟: {compressed_latency:.2f} ms")
    print(f"延迟变化: {original_latency / compressed_latency:.2f}x")
    print("==============================\n")

    # 创建用于比较的结果字典
    comparison_results = {
        'original': {
            'size_bytes': original_size_bytes,
            'param_count': adapter.get_param_count(cifar10_model),
            'accuracy': original_accuracy,
            'latency_ms': original_latency
        },
        'compressed': {
            'size_bytes': compressed_size_bytes,
            'param_count': adapter.get_param_count(compressed_model),
            'accuracy': compressed_accuracy,
            'latency_ms': compressed_latency
        },
        'improvements': {
            'size_reduction': original_size_mb / compressed_size_mb,
            'param_reduction': 1.0,  # 量化不改变参数数量
            'accuracy_change': compressed_accuracy - original_accuracy,
            'latency_improvement': original_latency / compressed_latency
        }
    }

    # 创建可视化目录
    vis_dir = os.path.join(args.output_dir, "visualization")
    os.makedirs(vis_dir, exist_ok=True)

    # 生成可视化结果
    print("生成可视化结果...")
    vis_success = plot_compression_results(comparison_results, vis_dir)

    if vis_success:
        print(f"可视化结果已保存到: {vis_dir}")
    else:
        print("生成可视化失败，请确保安装了matplotlib库")

    print("压缩示例完成!")


if __name__ == "__main__":
    main()