#!/usr/bin/env python
# -*- coding: utf-8 -*-

"""
通用模型压缩工具 - 一行命令压缩任意模型

用法：
    python universal_compress.py --model_path MODEL_PATH [options]

示例：
    python universal_compress.py --model_path ./models/resnet18.pth --method quantization --bits 8
"""

import os
import sys
import argparse
import logging
import time
import json
from typing import Dict, Any, List, Optional, Union

# 获取当前脚本的绝对路径
current_dir = os.path.dirname(os.path.abspath(__file__))
# 添加项目根目录到系统路径
sys.path.insert(0, current_dir)

from uamcf.core import CompressionManager
from uamcf.utils.evaluator import evaluate_model, compare_models
from uamcf.utils.visualizer import plot_compression_results
from uamcf.adapters import get_adapter

# 配置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
    handlers=[
        logging.FileHandler("compression.log"),
        logging.StreamHandler()
    ]
)
logger = logging.getLogger("universal_compressor")


def parse_args():
    """解析命令行参数"""
    parser = argparse.ArgumentParser(description="通用模型压缩工具")

    # 必需参数
    parser.add_argument("--model_path", type=str, required=True,
                        help="模型文件路径（.pth, .h5, .onnx等）")

    # 框架相关参数
    parser.add_argument("--framework", type=str, choices=["pytorch", "tensorflow", "onnx"],
                        help="模型框架，不指定则自动检测")

    # 压缩方法参数
    parser.add_argument("--method", type=str, default="auto",
                        choices=["auto", "quantization", "pruning", "distillation", "factorization"],
                        help="压缩方法，默认为自动选择")

    # 量化参数
    parser.add_argument("--bits", type=int, default=8,
                        help="量化位宽，默认为8位")
    parser.add_argument("--quant_scheme", type=str, default="asymmetric",
                        choices=["symmetric", "asymmetric"],
                        help="量化方案，默认为非对称量化")
    parser.add_argument("--per_channel", action="store_true",
                        help="是否使用按通道量化")

    # 剪枝参数
    parser.add_argument("--sparsity", type=float, default=0.5,
                        help="剪枝稀疏度，默认0.5")
    parser.add_argument("--prune_method", type=str, default="magnitude",
                        choices=["magnitude", "l1norm", "random"],
                        help="剪枝方法，默认为幅值剪枝")

    # 评估参数
    parser.add_argument("--data_path", type=str,
                        help="评估数据集路径，不提供则跳过评估")
    parser.add_argument("--batch_size", type=int, default=32,
                        help="评估批次大小")
    parser.add_argument("--num_classes", type=int,
                        help="分类任务的类别数，不提供则自动检测")

    # 其他参数
    parser.add_argument("--output_dir", type=str, default="./compressed_models",
                        help="输出目录")
    parser.add_argument("--finetune", action="store_true",
                        help="是否在压缩后进行微调")
    parser.add_argument("--finetune_epochs", type=int, default=5,
                        help="微调轮数")
    parser.add_argument("--visualize", action="store_true", default=True,
                        help="是否生成可视化结果")
    parser.add_argument("--auto_adapt", action="store_true", default=True,
                        help="是否自动调整压缩配置")
    parser.add_argument("--device", type=str, default="auto",
                        choices=["auto", "cpu", "cuda"],
                        help="计算设备")
    parser.add_argument("--config", type=str,
                        help="配置文件路径，可覆盖命令行参数")

    return parser.parse_args()


def detect_framework(model_path: str) -> str:
    """
    自动检测模型框架类型
    """
    ext = os.path.splitext(model_path)[1].lower()
    if ext in ['.pt', '.pth', '.pkl']:
        return "pytorch"
    elif ext in ['.h5', '.keras', '.tf']:
        return "tensorflow"
    elif ext in ['.onnx']:
        return "onnx"
    else:
        # 默认使用PyTorch
        logger.warning(f"无法识别模型格式: {ext}，默认使用PyTorch框架")
        return "pytorch"


def load_data(args):
    """
    加载评估数据集
    """
    if not args.data_path:
        logger.info("未提供数据集路径，跳过评估阶段")
        return None

    try:
        framework = args.framework if args.framework else detect_framework(args.model_path)

        if framework == "pytorch":
            import torch
            from torch.utils.data import DataLoader
            import torchvision.transforms as transforms

            # 尝试自动检测数据集类型
            data_dir = args.data_path
            if os.path.basename(data_dir).lower() in ['cifar10', 'cifar-10'] or 'cifar10' in data_dir.lower():
                from torchvision.datasets import CIFAR10

                transform = transforms.Compose([
                    transforms.Resize(224),  # 适应大多数模型的输入尺寸
                    transforms.ToTensor(),
                    transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
                ])

                dataset = CIFAR10(root=data_dir, train=False, download=True, transform=transform)

            elif os.path.basename(data_dir).lower() in ['imagenet', 'ilsvrc']:
                from torchvision.datasets import ImageNet

                transform = transforms.Compose([
                    transforms.Resize(256),
                    transforms.CenterCrop(224),
                    transforms.ToTensor(),
                    transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.201])
                ])

                try:
                    dataset = ImageNet(root=data_dir, split='val', transform=transform)
                except Exception as e:
                    logger.warning(f"无法加载ImageNet数据集: {str(e)}")
                    logger.info("尝试加载为通用图像文件夹...")
                    from torchvision.datasets import ImageFolder
                    dataset = ImageFolder(root=data_dir, transform=transform)
            else:
                # 尝试以通用图像文件夹加载
                from torchvision.datasets import ImageFolder

                transform = transforms.Compose([
                    transforms.Resize(256),
                    transforms.CenterCrop(224),
                    transforms.ToTensor(),
                    transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
                ])

                dataset = ImageFolder(root=data_dir, transform=transform)

            data_loader = DataLoader(
                dataset,
                batch_size=args.batch_size,
                shuffle=False,
                num_workers=4
            )

            logger.info(
                f"成功加载数据集，共 {len(dataset)} 个样本, {len(dataset.classes) if hasattr(dataset, 'classes') else '未知'} 个类别")
            return data_loader

        elif framework == "tensorflow":
            # TensorFlow数据加载方式
            logger.info("TensorFlow数据加载逻辑待实现")
            return None

        elif framework == "onnx":
            # ONNX模型数据处理方式
            logger.info("ONNX模型数据加载逻辑待实现")
            return None

    except Exception as e:
        logger.error(f"加载数据集失败: {str(e)}")
        import traceback
        logger.error(traceback.format_exc())
        return None


def auto_generate_compression_config(model, adapter, args):
    """
    自动生成最佳压缩配置
    """
    if not args.auto_adapt:
        # 如果不使用自适应压缩，则直接返回用户指定的配置
        methods = {}
        if args.method == "quantization" or args.method == "auto":
            methods["quantization"] = {
                "bits": args.bits,
                "scheme": args.quant_scheme,
                "per_channel": args.per_channel
            }
        if args.method == "pruning" or args.method == "auto":
            methods["pruning"] = {
                "sparsity": args.sparsity,
                "method": args.prune_method
            }

        return {
            "constraints": {
                "accuracy_threshold": 0.9,  # 默认保持90%精度
                "target_size": 0.5,  # 默认压缩到50%
            },
            "methods": methods
        }

    # 自动分析模型结构并生成最佳压缩配置
    logger.info("分析模型结构并生成最佳压缩配置...")

    try:
        from uamcf.core.analyzer import ModelAnalyzer

        # 创建模型分析器
        analyzer = ModelAnalyzer(adapter)

        # 分析模型
        analysis_result = analyzer.analyze_model(model)

        # 生成压缩建议
        compression_suggestions = analyzer.generate_optimization_suggestions(analysis_result["bottlenecks"])

        # 根据分析结果生成压缩配置
        methods = {}

        # 找出建议的主要压缩方法
        top_methods = {}
        for suggestion in compression_suggestions:
            for method in suggestion["methods"]:
                if method not in top_methods:
                    top_methods[method] = 0
                top_methods[method] += suggestion["priority"]

        # 选择优先级最高的方法（不超过2种）
        selected_methods = sorted(top_methods.items(), key=lambda x: x[1], reverse=True)[:2]

        for method, _ in selected_methods:
            if method == "quantization":
                methods["quantization"] = {
                    "bits": args.bits,
                    "scheme": args.quant_scheme,
                    "per_channel": args.per_channel
                }
            elif method == "pruning":
                methods["pruning"] = {
                    "sparsity": args.sparsity,
                    "method": args.prune_method
                }
            elif method == "factorization":
                methods["factorization"] = {
                    "rank_ratio": 0.5
                }

        # 如果没有推荐任何方法，默认使用量化
        if not methods:
            methods["quantization"] = {
                "bits": args.bits,
                "scheme": args.quant_scheme,
                "per_channel": args.per_channel
            }

        config = {
            "constraints": {
                "accuracy_threshold": 0.9,  # 默认保持90%精度
                "target_size": 0.5,  # 默认压缩到50%
            },
            "methods": methods
        }

        logger.info(f"自动生成压缩配置: {json.dumps(config, indent=2)}")
        return config

    except Exception as e:
        logger.error(f"自动生成压缩配置失败: {str(e)}")
        import traceback
        logger.error(traceback.format_exc())

        # 失败时返回默认配置
        return {
            "constraints": {
                "accuracy_threshold": 0.9,
                "target_size": 0.5,
            },
            "methods": {
                "quantization": {
                    "bits": args.bits,
                    "scheme": args.quant_scheme,
                    "per_channel": args.per_channel
                }
            }
        }


def main():
    """主函数"""
    start_time = time.time()

    # 解析命令行参数
    args = parse_args()

    # 如果提供了配置文件，加载配置
    if args.config and os.path.exists(args.config):
        try:
            with open(args.config, 'r') as f:
                config = json.load(f)
                # 用配置文件中的值覆盖默认值，但命令行参数优先
                for key, value in config.items():
                    if not hasattr(args, key) or getattr(args, key) is None:
                        setattr(args, key, value)
            logger.info(f"从{args.config}加载配置成功")
        except Exception as e:
            logger.error(f"加载配置文件失败: {str(e)}")

    # 检测模型框架
    if not args.framework:
        args.framework = detect_framework(args.model_path)
    logger.info(f"使用框架: {args.framework}")

    # 创建输出目录
    os.makedirs(args.output_dir, exist_ok=True)

    # 设置设备
    if args.device == "auto":
        if args.framework == "pytorch":
            import torch
            args.device = "cuda" if torch.cuda.is_available() else "cpu"
        else:
            args.device = "cpu"
    logger.info(f"使用设备: {args.device}")

    # 加载模型
    logger.info(f"正在加载模型: {args.model_path}")
    try:
        # 获取适配器
        adapter = get_adapter(args.framework, device=args.device)

        # 加载模型
        model = adapter.load_model(args.model_path)
        if model is None:
            logger.error(f"加载模型失败: {args.model_path}")
            return

        logger.info(f"成功加载模型: {args.model_path}")

        # 输出模型信息
        model_info = adapter.get_model_info(model)
        logger.info(f"模型信息: {model_info}")
        logger.info(f"模型大小: {adapter.get_model_size(model) / (1024 * 1024):.2f} MB")
        logger.info(f"参数数量: {adapter.get_parameter_count(model):,}")

    except Exception as e:
        logger.error(f"加载模型失败: {str(e)}")
        import traceback
        logger.error(traceback.format_exc())
        return

    # 加载评估数据
    eval_data = load_data(args)

    # 评估原始模型性能
    if eval_data is not None:
        logger.info("评估原始模型性能...")
        original_metrics = evaluate_model(model, eval_data, adapter_name=args.framework)
        logger.info(f"原始模型性能: {original_metrics}")
    else:
        original_metrics = {}

    # 自动生成压缩配置
    compression_config = auto_generate_compression_config(model, adapter, args)

    # 创建压缩管理器
    compression_manager = CompressionManager()

    # 执行压缩
    logger.info("开始执行模型压缩...")
    compressed_model, stats = compression_manager.compress(
        model,
        compression_config["constraints"],
        adapter_name=args.framework,
        eval_data=eval_data
    )

    # 评估压缩后的模型
    if eval_data is not None:
        logger.info("评估压缩后模型性能...")
        compressed_metrics = evaluate_model(compressed_model, eval_data, adapter_name=args.framework)
        logger.info(f"压缩后模型性能: {compressed_metrics}")

        # 比较原始和压缩后的模型
        logger.info("比较原始和压缩后的模型...")
        comparison = compare_models(model, compressed_model, eval_data, adapter_name=args.framework)

        # 输出比较结果
        if 'improvements' in comparison:
            imp = comparison['improvements']
            logger.info("压缩结果比较:")
            if 'size_reduction' in imp:
                logger.info(f"  大小减少: {imp['size_reduction']:.2f}x")
            if 'param_reduction' in imp:
                logger.info(f"  参数减少: {imp['param_reduction']:.2f}x")
            if 'accuracy_change' in imp:
                logger.info(f"  精度变化: {imp['accuracy_change'] * 100:+.2f}%")
            if 'latency_improvement' in imp:
                logger.info(f"  延迟改进: {imp['latency_improvement']:.2f}x")

        # 生成可视化结果
        if args.visualize:
            vis_dir = os.path.join(args.output_dir, "visualization")
            os.makedirs(vis_dir, exist_ok=True)
            try:
                plot_compression_results(comparison, vis_dir)
                logger.info(f"可视化结果已保存到: {vis_dir}")
            except Exception as e:
                logger.error(f"生成可视化结果失败: {str(e)}")

    # 保存压缩后的模型
    output_path = os.path.join(args.output_dir, os.path.basename(args.model_path).split('.')[0] + "_compressed." +
                               args.model_path.split('.')[-1])
    compression_manager.export(compressed_model, output_path, adapter_name=args.framework)
    logger.info(f"压缩模型已保存到: {output_path}")

    # 保存压缩配置
    config_path = os.path.join(args.output_dir,
                               os.path.basename(args.model_path).split('.')[0] + "_compression_config.json")
    with open(config_path, 'w') as f:
        json.dump(compression_config, f, indent=4)
    logger.info(f"压缩配置已保存到: {config_path}")

    # 输出总结
    elapsed_time = time.time() - start_time
    logger.info(f"压缩过程完成，总耗时: {elapsed_time:.2f} 秒")

    # 如果有评估数据，输出详细报告
    if eval_data is not None and 'improvements' in comparison:
        imp = comparison['improvements']
        print("\n" + "=" * 50)
        print(" " * 15 + "压缩结果报告")
        print("=" * 50)
        print(f"模型文件: {os.path.basename(args.model_path)}")
        print(f"压缩方法: {', '.join(compression_config['methods'].keys())}")
        print(f"原始大小: {original_metrics.get('size_bytes', 0) / (1024 * 1024):.2f} MB")
        print(f"压缩后大小: {compressed_metrics.get('size_bytes', 0) / (1024 * 1024):.2f} MB")
        if 'size_reduction' in imp:
            print(f"压缩比例: {imp['size_reduction']:.2f}x")
        if 'accuracy_change' in imp:
            print(f"精度变化: {imp['accuracy_change'] * 100:+.2f}%")
        if 'latency_improvement' in imp:
            print(f"推理速度提升: {imp['latency_improvement']:.2f}x")
        print(f"压缩模型保存路径: {output_path}")
        if args.visualize:
            print(f"可视化结果路径: {vis_dir}")
        print("=" * 50)


if __name__ == "__main__":
    main()