#!/usr/bin/env python
# -*- coding: utf-8 -*-

"""
YOLO模型压缩示例 - 使用UAMCF框架压缩YOLOv5模型
"""

import os
import argparse
import torch
import torch.nn as nn
import logging
import time
import copy
import sys
import yaml
from pathlib import Path

# 添加项目根目录到系统路径
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))

# 导入自定义模块
from uamcf.methods.quantization import Quantization
from uamcf.methods.pruning import Pruning
from uamcf.adapters.pytorch import PyTorchAdapter
from uamcf.utils.visualizer import plot_compression_results

# 设置日志
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger("main")


def parse_args():
    parser = argparse.ArgumentParser(description="YOLO模型压缩示例")
    parser.add_argument("--model-path", required=True, help="YOLO模型权重路径")
    parser.add_argument("--data-path", required=True, help="数据集配置文件路径")
    parser.add_argument("--output-dir", default="./output", help="输出目录")
    parser.add_argument("--batch-size", type=int, default=16, help="批处理大小")
    parser.add_argument("--img-size", type=int, default=640, help="图像大小")
    parser.add_argument("--epochs", type=int, default=5, help="微调轮数")

    # 压缩方法参数
    parser.add_argument("--method", choices=["quantization", "pruning", "both"],
                        default="quantization", help="压缩方法")
    parser.add_argument("--bits", type=int, default=8, help="量化位宽")
    parser.add_argument("--scheme", default="asymmetric",
                        choices=["symmetric", "asymmetric"], help="量化方案")
    parser.add_argument("--per-channel", action="store_true", help="是否按通道量化")
    parser.add_argument("--sparsity", type=float, default=0.5, help="剪枝稀疏度")
    return parser.parse_args()


def load_yolo_model(model_path):
    """加载YOLO模型"""
    logger.info(f"加载YOLO模型: {model_path}")
    try:
        # 尝试直接导入YOLOv5
        import sys
        sys.path.append('./yolov5')  # 假设YOLOv5仓库在当前目录
        from models.experimental import attempt_load

        # 加载模型
        model = attempt_load(model_path)
        logger.info(f"成功加载YOLO模型")
        return model
    except ImportError:
        logger.error("未找到YOLOv5仓库。请确保YOLOv5仓库在./yolov5目录下")
        logger.info("您可以使用以下命令克隆仓库: git clone https://github.com/ultralytics/yolov5")
        sys.exit(1)
    except Exception as e:
        logger.error(f"加载模型时出错: {str(e)}")
        sys.exit(1)


def prepare_dataloader(data_path, batch_size=16, img_size=640):
    """准备数据加载器"""
    logger.info(f"准备数据加载器: {data_path}")
    try:
        # 加载数据集配置
        with open(data_path, 'r') as f:
            data_dict = yaml.safe_load(f)

        # 导入YOLOv5数据集加载功能
        from uamcf.models.yolov5.utils.datasets import create_dataloader
        from uamcf.models.yolov5.utils.general import check_dataset

        # 检查数据集配置
        data_dict = check_dataset(data_dict)

        # 创建验证数据加载器
        val_path = data_dict['val']
        val_loader = create_dataloader(val_path, img_size, batch_size, stride=32,
                                       pad=0.5, rect=True, workers=8)[0]

        logger.info(f"成功创建验证数据加载器，包含 {len(val_loader.dataset)} 个样本")
        return val_loader

    except Exception as e:
        logger.error(f"准备数据加载器时出错: {str(e)}")
        sys.exit(1)


def evaluate_model(model, val_loader, device):
    """评估YOLO模型性能"""
    logger.info("评估模型性能...")
    try:
        # 导入YOLOv5评估功能
        from yolov5.utils.metrics import ap_per_class
        from yolov5.utils.general import non_max_suppression, check_img_size, scale_coords, xyxy2xywh
        from yolov5.utils.torch_utils import time_synchronized

        model.eval()
        model = model.to(device)

        # 初始化评估指标
        stats = []
        seen = 0
        iouv = torch.linspace(0.5, 0.95, 10).to(device)  # iou向量，用于mAP@0.5:0.95
        niou = iouv.numel()

        # 开始评估
        t1 = time.time()
        for batch_i, (img, targets, paths, shapes) in enumerate(val_loader):
            img = img.to(device, non_blocking=True)
            img = img.float() / 255.0  # 0 - 255 转为 0.0 - 1.0
            targets = targets.to(device)

            # 运行推理
            with torch.no_grad():
                # 记录推理时间
                t = time_synchronized()
                outputs = model(img)
                inference_time = time_synchronized() - t

                # NMS
                outputs = non_max_suppression(outputs, conf_thres=0.001, iou_thres=0.65)

            # 处理每个图像的结果
            for si, pred in enumerate(outputs):
                labels = targets[targets[:, 0] == si, 1:]
                nl = len(labels)
                tcls = labels[:, 0].tolist() if nl else []  # 目标类别
                seen += 1

                # 如果没有预测结果，跳过
                if len(pred) == 0:
                    if nl:
                        stats.append((torch.zeros(0, niou, dtype=torch.bool),
                                      torch.Tensor(), torch.Tensor(), tcls))
                    continue

                # 添加到统计信息中
                # 简化处理 - 实际情况应计算每个检测结果和真实标签之间的IoU
                # 此处仅作为示例，实际评估应使用YOLOv5的完整评估功能
                stats.append((torch.zeros(0, niou, dtype=torch.bool),
                              torch.Tensor(), torch.Tensor(), tcls))

        # 计算mAP (simplified)
        t2 = time.time()
        map50 = 0.0  # mAP@0.5
        map = 0.0  # mAP@0.5:0.95

        # 在实际应用中，应该使用YOLOv5的完整评估功能计算mAP

        logger.info(f"评估完成，耗时: {t2 - t1:.2f}s")
        logger.info(f"mAP@0.5: {map50:.4f}, mAP@0.5:0.95: {map:.4f}")

        return {
            'mAP50': map50,
            'mAP': map,
            'inference_time': inference_time * 1000  # 转换为毫秒
        }

    except Exception as e:
        logger.error(f"评估模型时出错: {str(e)}")
        import traceback
        logger.error(traceback.format_exc())
        return {'mAP50': 0, 'mAP': 0, 'inference_time': 0}


def get_model_size(model, adapter):
    """获取模型大小"""
    return adapter.get_model_size(model)


def get_model_params(model, adapter):
    """获取模型参数数量"""
    return adapter.get_parameter_count(model)


def finetune_yolo(model, data_path, img_size, batch_size, epochs, device):
    """微调YOLO模型"""
    logger.info(f"微调YOLO模型，轮数: {epochs}")
    try:
        # 导入YOLOv5训练功能
        import sys
        sys.path.append('./yolov5')
        from train import train

        # 准备微调参数
        opt = argparse.Namespace()
        opt.weights = ''  # 不加载预训练权重，使用当前模型
        opt.cfg = ''  # 不指定配置文件
        opt.data = data_path
        opt.hyp = './yolov5/data/hyps/hyp.finetune.yaml'  # 超参数文件
        opt.epochs = epochs
        opt.batch_size = batch_size
        opt.img_size = [img_size]
        opt.rect = False
        opt.resume = False
        opt.nosave = False
        opt.notest = False
        opt.noautoanchor = True
        opt.evolve = False
        opt.bucket = ''
        opt.cache_images = False
        opt.name = 'finetune'
        opt.device = device
        opt.multi_scale = False
        opt.single_cls = False
        opt.adam = True
        opt.sync_bn = False
        opt.local_rank = -1
        opt.workers = 8
        opt.project = 'runs/finetune'

        # 创建临时文件保存当前模型权重
        temp_weight_path = './temp_model.pt'
        torch.save(model.state_dict(), temp_weight_path)
        opt.weights = temp_weight_path

        # 运行微调
        finetuned_model = train(opt)

        # 删除临时权重文件
        if os.path.exists(temp_weight_path):
            os.remove(temp_weight_path)

        logger.info("微调完成")
        return finetuned_model

    except Exception as e:
        logger.error(f"微调模型时出错: {str(e)}")
        import traceback
        logger.error(traceback.format_exc())
        return model


def compress_yolo_model(model, method, quantization_config=None, pruning_config=None):
    """压缩YOLO模型"""
    logger.info(f"使用方法 '{method}' 压缩YOLO模型")

    # 创建PyTorch适配器
    device = next(model.parameters()).device
    adapter = PyTorchAdapter(device=device)

    # 备份原始模型
    original_model = copy.deepcopy(model)
    compressed_model = model

    if method == "quantization" or method == "both":
        if quantization_config is None:
            quantization_config = {
                "bits": 8,
                "scheme": "asymmetric",
                "per_channel": False
            }

        # 创建量化对象
        quantization = Quantization(quantization_config)
        logger.info(f"应用量化: {quantization_config}")

        # 对模型的每一层应用量化
        compressed_model = apply_compression_to_yolo(compressed_model, adapter, quantization)

    if method == "pruning" or method == "both":
        if pruning_config is None:
            pruning_config = {
                "sparsity": 0.5,
                "method": "magnitude",
                "granularity": "channel"
            }

        # 创建剪枝对象
        pruning = Pruning(pruning_config)
        logger.info(f"应用剪枝: {pruning_config}")

        # 对模型的每一层应用剪枝
        compressed_model = apply_compression_to_yolo(compressed_model, adapter, pruning)

    return original_model, compressed_model, adapter


def apply_compression_to_yolo(model, adapter, compression_method):
    """对YOLO模型应用压缩方法"""
    compressed_model = copy.deepcopy(model)

    # 遍历模型的所有层
    for name, module in compressed_model.named_modules():
        # 检查是否是可以压缩的层类型（卷积或全连接）
        if isinstance(module, (nn.Conv2d, nn.Linear)):
            logger.info(f"对层 '{name}' 应用压缩")

            # 应用压缩方法
            compressed_module = compression_method.compress(module)

            # 检查是否成功压缩
            if hasattr(compressed_module, "_quantized") or hasattr(compressed_module, "_pruned"):
                # 替换原始层
                try:
                    adapter.replace_layer(compressed_model, name, compressed_module)
                    logger.info(f"成功压缩层: {name}")
                except Exception as e:
                    logger.warning(f"替换层 '{name}' 时出错: {str(e)}")

    return compressed_model


def main():
    args = parse_args()

    # 设置设备
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    logger.info(f"使用设备: {device}")

    # 创建输出目录
    os.makedirs(args.output_dir, exist_ok=True)

    # 加载YOLO模型
    model = load_yolo_model(args.model_path)
    model = model.to(device)

    # 准备数据加载器
    val_loader = prepare_dataloader(args.data_path, args.batch_size, args.img_size)

    # 评估原始模型
    logger.info("评估原始模型...")
    original_metrics = evaluate_model(model, val_loader, device)

    # 配置压缩方法
    quantization_config = {
        "bits": args.bits,
        "scheme": args.scheme,
        "per_channel": args.per_channel
    }

    pruning_config = {
        "sparsity": args.sparsity,
        "method": "magnitude",
        "granularity": "channel"
    }

    # 压缩模型
    logger.info("开始压缩模型...")
    original_model, compressed_model, adapter = compress_yolo_model(
        model, args.method, quantization_config, pruning_config
    )

    # 评估压缩后的模型
    logger.info("评估压缩后的模型...")
    compressed_metrics = evaluate_model(compressed_model, val_loader, device)

    # 微调压缩后的模型
    if args.epochs > 0:
        logger.info(f"微调压缩后的模型，轮数: {args.epochs}...")
        compressed_model = finetune_yolo(
            compressed_model, args.data_path, args.img_size,
            args.batch_size, args.epochs, device
        )

        # 再次评估微调后的模型
        logger.info("评估微调后的压缩模型...")
        compressed_metrics = evaluate_model(compressed_model, val_loader, device)

    # 保存压缩后的模型
    compressed_model_path = os.path.join(args.output_dir, "yolo_compressed.pt")
    torch.save(compressed_model.state_dict(), compressed_model_path)
    logger.info(f"压缩模型已保存到: {compressed_model_path}")

    # 计算模型大小
    original_size_bytes = get_model_size(original_model, adapter)
    compressed_size_bytes = get_model_size(compressed_model, adapter)

    original_size_mb = original_size_bytes / (1024 * 1024)
    compressed_size_mb = compressed_size_bytes / (1024 * 1024)

    # 获取参数数量
    original_params = get_model_params(original_model, adapter)
    compressed_params = get_model_params(compressed_model, adapter)

    # 输出比较结果
    print("\n========== 压缩结果 ==========")
    print(f"原始模型大小: {original_size_mb:.2f} MB")
    print(f"压缩模型大小: {compressed_size_mb:.2f} MB")
    print(f"压缩比例: {original_size_mb / compressed_size_mb:.2f}x")
    print(f"原始模型参数数量: {original_params:,}")
    print(f"压缩模型参数数量: {compressed_params:,}")
    print(f"参数减少比例: {original_params / max(1, compressed_params):.2f}x")
    print(f"原始模型mAP@0.5: {original_metrics['mAP50']:.4f}")
    print(f"压缩模型mAP@0.5: {compressed_metrics['mAP50']:.4f}")
    print(f"mAP@0.5变化: {(compressed_metrics['mAP50'] - original_metrics['mAP50']) * 100:+.2f}%")
    print(f"原始模型推理时间: {original_metrics['inference_time']:.2f} ms")
    print(f"压缩模型推理时间: {compressed_metrics['inference_time']:.2f} ms")
    print(f"速度提升: {original_metrics['inference_time'] / max(0.1, compressed_metrics['inference_time']):.2f}x")
    print("==============================\n")

    # 创建用于比较的结果字典
    comparison_results = {
        'original': {
            'size_bytes': original_size_bytes,
            'param_count': original_params,
            'accuracy': original_metrics['mAP50'],  # 使用mAP50作为准确率指标
            'latency_ms': original_metrics['inference_time']
        },
        'compressed': {
            'size_bytes': compressed_size_bytes,
            'param_count': compressed_params,
            'accuracy': compressed_metrics['mAP50'],
            'latency_ms': compressed_metrics['inference_time']
        },
        'improvements': {
            'size_reduction': original_size_bytes / max(1, compressed_size_bytes),
            'param_reduction': original_params / max(1, compressed_params),
            'accuracy_change': compressed_metrics['mAP50'] - original_metrics['mAP50'],
            'latency_improvement': original_metrics['inference_time'] / max(0.1, compressed_metrics['inference_time'])
        }
    }

    # 创建可视化目录
    vis_dir = os.path.join(args.output_dir, "visualization")
    os.makedirs(vis_dir, exist_ok=True)

    # 生成可视化结果
    logger.info("生成可视化结果...")
    vis_success = plot_compression_results(comparison_results, vis_dir)

    if vis_success:
        logger.info(f"可视化结果已保存到: {vis_dir}")
    else:
        logger.warning("生成可视化失败")

    logger.info("YOLO模型压缩示例完成!")


if __name__ == "__main__":
    main()