#!/usr/bin/env python
# -*- coding: utf-8 -*-

"""
通用模型压缩工具 - 支持一键压缩多种框架的模型文件
"""

import os
import sys
import json
import argparse
import traceback
import importlib
from typing import Dict, Any, List, Optional, Tuple, Union
import logging
import numpy as np
import torch
import torch.nn as nn
import torchvision.models as models

from uamcf.methods.quantization import Quantization
from uamcf.adapters.pytorch import PyTorchAdapter
from uamcf.utils.visualizer import plot_compression_results

# 设置日志
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger("UniversalCompress")

# 添加项目根目录到系统路径
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))

# 导入压缩框架组件
try:
    from uamcf.adapters import PyTorchAdapter, TensorFlowAdapter, ONNXAdapter, TFLiteAdapter
    from uamcf.methods import get_method, get_method_registry
    from uamcf.utils import evaluate_model, compare_models, plot_compression_results
    from uamcf import CompressionManager
    from uamcf.core import ModelAnalyzer

    UAMCF_AVAILABLE = True
except ImportError:
    logger.error("无法导入uamcf模块。请确保已安装该框架。")
    UAMCF_AVAILABLE = False
    sys.exit(1)

# 定义常见模型预设
MODEL_PRESETS = {
    "resnet18": {
        "create_func": lambda: models.resnet18(pretrained=False),
        "input_shape": [1, 3, 224, 224],
        "output_size": 1000
    },
    "resnet50": {
        "create_func": lambda: models.resnet50(pretrained=False),
        "input_shape": [1, 3, 224, 224],
        "output_size": 1000
    },
    "mobilenet_v2": {
        "create_func": lambda: models.mobilenet_v2(pretrained=False),
        "input_shape": [1, 3, 224, 224],
        "output_size": 1000
    },
    "vgg16": {
        "create_func": lambda: models.vgg16(pretrained=False),
        "input_shape": [1, 3, 224, 224],
        "output_size": 1000
    },
    "efficientnet_b0": {
        "create_func": lambda: models.efficientnet_b0(pretrained=False),
        "input_shape": [1, 3, 224, 224],
        "output_size": 1000
    }
}


# 创建简单的通用模型容器
class GenericModelContainer(nn.Module):
    """通用模型容器，用于存储无法识别架构的权重"""

    def __init__(self, weights_dict, input_shape=[1, 3, 224, 224], output_size=1000):
        super().__init__()
        self.weights = weights_dict
        self.input_shape = input_shape
        self.output_size = output_size
        self.layers = nn.ModuleList()

        # 构建一个基础模型
        if len(input_shape) == 4:  # 图像输入
            self._build_image_model()
        elif len(input_shape) <= 3:  # 序列或向量输入
            self._build_sequence_model()
        else:
            raise ValueError(f"不支持的输入形状: {input_shape}")

        # 加载权重
        self._load_weights()

    def _build_image_model(self):
        """构建基本图像处理模型"""
        in_channels = self.input_shape[1]
        self.feature_extractor = nn.Sequential(
            nn.Conv2d(in_channels, 64, kernel_size=3, stride=1, padding=1),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=2, stride=2),
            nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=2, stride=2),
            nn.Conv2d(128, 256, kernel_size=3, stride=1, padding=1),
            nn.ReLU(inplace=True),
            nn.AdaptiveAvgPool2d((1, 1))
        )
        self.classifier = nn.Linear(256, self.output_size)

    def _build_sequence_model(self):
        """构建基本序列处理模型"""
        if len(self.input_shape) == 2:
            input_size = self.input_shape[1]
        else:
            input_size = self.input_shape[1] * self.input_shape[2]

        self.encoder = nn.Sequential(
            nn.Linear(input_size, 512),
            nn.ReLU(),
            nn.Linear(512, 256),
            nn.ReLU()
        )
        self.classifier = nn.Linear(256, self.output_size)

    def _load_weights(self):
        """尝试将权重字典中的值加载到模型中"""
        try:
            # 创建权重名称到模型参数的映射
            model_state = self.state_dict()

            # 尝试直接加载
            if isinstance(self.weights, dict):
                # 标准化权重字典
                if "state_dict" in self.weights:
                    weights_dict = self.weights["state_dict"]
                elif "params" in self.weights:
                    weights_dict = self.weights["params"]
                elif "model" in self.weights and isinstance(self.weights["model"], dict):
                    weights_dict = self.weights["model"]
                else:
                    weights_dict = self.weights

                # 检查键的匹配程度
                matched_keys = [k for k in weights_dict.keys() if k in model_state]

                if len(matched_keys) > 0:
                    # 选择性加载匹配的键
                    filtered_dict = {k: weights_dict[k] for k in matched_keys}
                    self.load_state_dict(filtered_dict, strict=False)
                    logger.info(f"从权重字典加载了 {len(matched_keys)} 个参数")
                else:
                    logger.warning("没有找到匹配的权重参数")
        except Exception as e:
            logger.error(f"加载权重失败: {str(e)}")

    def forward(self, x):
        """前向传播"""
        if hasattr(self, 'feature_extractor'):
            x = self.feature_extractor(x)
            x = torch.flatten(x, 1)
        elif hasattr(self, 'encoder'):
            if len(x.shape) > 2:  # 如果输入是3D的，展平
                x = x.view(x.size(0), -1)
            x = self.encoder(x)

        return self.classifier(x)


def detect_framework(model_path: str) -> str:
    """检测模型文件的框架类型"""
    if model_path.endswith(".pth") or model_path.endswith(".pt"):
        return "pytorch"
    elif model_path.endswith(".h5") or model_path.endswith(".keras") or model_path.endswith(".tf"):
        return "tensorflow"
    elif model_path.endswith(".onnx"):
        return "onnx"
    elif model_path.endswith(".tflite"):
        return "tflite"
    else:
        # 尝试通过文件内容检测
        try:
            # 尝试用PyTorch加载
            try:
                torch.load(model_path, map_location="cpu")
                return "pytorch"
            except:
                pass

            # 更多检测逻辑...

        except Exception:
            pass

        raise ValueError(f"无法识别模型框架，请使用 .pth/.h5/.onnx/.tflite 格式，当前：{model_path}")


def create_model_by_type(model_type: str, num_classes: int = None) -> nn.Module:
    """根据模型类型创建模型架构"""
    if model_type in MODEL_PRESETS:
        model = MODEL_PRESETS[model_type]["create_func"]()

        # 如果需要修改输出层
        if num_classes is not None and hasattr(model, 'fc'):
            in_features = model.fc.in_features
            model.fc = nn.Linear(in_features, num_classes)
        elif num_classes is not None and hasattr(model, 'classifier'):
            if isinstance(model.classifier, nn.Linear):
                in_features = model.classifier.in_features
                model.classifier = nn.Linear(in_features, num_classes)
            elif isinstance(model.classifier, nn.Sequential) and isinstance(model.classifier[-1], nn.Linear):
                in_features = model.classifier[-1].in_features
                model.classifier[-1] = nn.Linear(in_features, num_classes)

        return model
    else:
        raise ValueError(f"不支持的模型类型: {model_type}")


def infer_model_architecture(weights_dict: Dict, input_shape: List[int] = None, output_size: int = None) -> nn.Module:
    """尝试通过权重结构推断模型架构"""
    # 提取有用的线索
    key_patterns = {}
    for key in weights_dict.keys():
        if isinstance(key, str):
            parts = key.split('.')
            for part in parts:
                if part not in key_patterns:
                    key_patterns[part] = 0
                key_patterns[part] += 1

    # 检查是否匹配预设模型
    if "resnet" in str(key_patterns).lower() or any(k for k in key_patterns if "layer" in k and k[-1].isdigit()):
        # 可能是ResNet
        layer_counts = [key_patterns.get(f"layer{i}", 0) for i in range(1, 5)]

        if sum(layer_counts) <= 20:
            logger.info("权重结构匹配ResNet18")
            return create_model_by_type("resnet18", output_size)
        elif sum(layer_counts) <= 40:
            logger.info("权重结构匹配ResNet34")
            return create_model_by_type("resnet34", output_size)
        else:
            logger.info("权重结构匹配ResNet50")
            return create_model_by_type("resnet50", output_size)

    elif "vgg" in str(key_patterns).lower() or "features" in key_patterns and "classifier" in key_patterns:
        # 可能是VGG
        logger.info("权重结构匹配VGG16")
        return create_model_by_type("vgg16", output_size)

    elif "mobilenet" in str(key_patterns).lower() or (
            "features" in key_patterns and "conv" in key_patterns and key_patterns.get("dw", 0) > 0):
        # 可能是MobileNet
        logger.info("权重结构匹配MobileNetV2")
        return create_model_by_type("mobilenet_v2", output_size)

    # 默认返回通用容器
    logger.info("无法识别模型架构，使用通用容器")
    return GenericModelContainer(weights_dict, input_shape, output_size)


def load_model(model_path: str, model_type: str = None, input_shape: List[int] = None,
               output_size: int = None) -> nn.Module:
    """增强的模型加载函数，支持多种场景"""
    logger.info(f"加载模型: {model_path}")

    try:
        # 检测设备
        device = "cuda" if torch.cuda.is_available() else "cpu"

        # 加载模型文件
        loaded_data = torch.load(model_path, map_location=device)

        # 情况1: 直接加载的是模型实例
        if isinstance(loaded_data, nn.Module):
            logger.info("直接加载完整模型实例")
            return loaded_data

        # 情况2: 如果指定了模型类型，创建相应架构并加载权重
        if model_type:
            logger.info(f"使用指定的模型类型: {model_type}")
            model = create_model_by_type(model_type, output_size)

            # 尝试加载权重
            if isinstance(loaded_data, dict):
                if "state_dict" in loaded_data:
                    try:
                        model.load_state_dict(loaded_data["state_dict"])
                        logger.info("从'state_dict'键加载权重成功")
                        return model
                    except Exception as e:
                        logger.warning(f"从'state_dict'键加载权重失败: {str(e)}")

                if "params" in loaded_data:
                    try:
                        model.load_state_dict(loaded_data["params"])
                        logger.info("从'params'键加载权重成功")
                        return model
                    except Exception as e:
                        logger.warning(f"从'params'键加载权重失败: {str(e)}")

                if "model" in loaded_data and isinstance(loaded_data["model"], (dict, nn.Module)):
                    try:
                        if isinstance(loaded_data["model"], dict):
                            model.load_state_dict(loaded_data["model"])
                        else:
                            # 如果model键存储的是模型实例，直接返回
                            return loaded_data["model"]
                        logger.info("从'model'键加载权重成功")
                        return model
                    except Exception as e:
                        logger.warning(f"从'model'键加载权重失败: {str(e)}")

                # 尝试直接加载整个字典
                try:
                    model.load_state_dict(loaded_data)
                    logger.info("直接加载权重字典成功")
                    return model
                except Exception as e:
                    logger.warning(f"直接加载权重字典失败: {str(e)}")

            logger.warning("指定模型类型但权重加载失败，返回未初始化模型")
            return model

        # 情况3: 尝试从权重推断模型架构
        logger.info("尝试从权重结构推断模型架构")
        return infer_model_architecture(loaded_data, input_shape, output_size)

    except Exception as e:
        logger.error(f"模型加载失败: {str(e)}")
        logger.error(traceback.format_exc())
        return None


def debug_model_file(model_path, device="cuda"):
    """调试模型文件的结构"""
    loaded_data = torch.load(model_path, map_location=device)
    logger.info(f"加载对象的类型: {type(loaded_data)}")

    if isinstance(loaded_data, dict):
        logger.info(f"字典中的键: {loaded_data.keys()}")
        for key, value in loaded_data.items():
            logger.info(f"键 '{key}' 的值类型: {type(value)}")
            if isinstance(value, dict):
                logger.info(f"  '{key}' 包含的子键: {value.keys()}")

    return loaded_data


def create_simple_model(input_shape=(3, 224, 224), num_classes=10):
    """创建一个简单的PyTorch模型，用于测试或容器"""

    class SimpleModel(nn.Module):
        def __init__(self, input_shape, num_classes):
            super(SimpleModel, self).__init__()
            self.features = nn.Sequential(
                nn.Conv2d(input_shape[0], 64, kernel_size=3, stride=1, padding=1),
                nn.ReLU(inplace=True),
                nn.MaxPool2d(kernel_size=2, stride=2),
                nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1),
                nn.ReLU(inplace=True),
                nn.MaxPool2d(kernel_size=2, stride=2),
            )
            self.avgpool = nn.AdaptiveAvgPool2d((7, 7))
            self.classifier = nn.Sequential(
                nn.Linear(128 * 7 * 7, 512),
                nn.ReLU(inplace=True),
                nn.Linear(512, num_classes),
            )

        def forward(self, x):
            x = self.features(x)
            x = self.avgpool(x)
            x = torch.flatten(x, 1)
            x = self.classifier(x)
            return x

    return SimpleModel(input_shape, num_classes)


def load_config(config_path):
    """加载JSON配置文件"""
    if not config_path or not os.path.exists(config_path):
        return {}

    try:
        with open(config_path, 'r') as f:
            config = json.load(f)
            logger.info(f"成功加载配置文件: {config_path}")
            return config
    except Exception as e:
        logger.error(f"配置文件加载失败: {str(e)}")
        return {}


def parse_shape(shape_str):
    """解析形状字符串为列表，如'1,3,224,224' -> [1,3,224,224]"""
    if not shape_str:
        return None
    try:
        return [int(x) for x in shape_str.split(',')]
    except:
        logger.error(f"形状解析失败: {shape_str}")
        return None


def get_eval_data(framework: str, input_shape=None, batch_size: int = 32):
    """获取评估数据"""
    if framework == "pytorch":
        import torchvision.transforms as transforms
        from torchvision import datasets
        from torch.utils.data import DataLoader

        # 确定输入大小
        if input_shape and len(input_shape) == 4:
            img_size = max(input_shape[2], input_shape[3])
        else:
            img_size = 224  # 默认大小

        transform = transforms.Compose([
            transforms.Resize(img_size),
            transforms.CenterCrop(img_size),
            transforms.ToTensor(),
            transforms.Normalize([0.5] * 3, [0.5] * 3)
        ])

        try:
            # 尝试使用CIFAR-10
            dataset = datasets.CIFAR10(root="./data", train=False, download=True, transform=transform)
            return DataLoader(dataset, batch_size=batch_size, shuffle=False)
        except:
            # 如果失败，创建随机数据
            if input_shape:
                dummy_data = torch.randn(batch_size, *input_shape[1:])
                dummy_labels = torch.randint(0, 10, (batch_size,))
                return [(dummy_data, dummy_labels)]
            else:
                dummy_data = torch.randn(batch_size, 3, 224, 224)
                dummy_labels = torch.randint(0, 10, (batch_size,))
                return [(dummy_data, dummy_labels)]

    # 其他框架处理...
    return None


def select_best_compression_method(model, framework="pytorch", eval_data=None):
    """
    根据模型特性自动选择最合适的压缩方法

    Args:
        model: 要压缩的模型
        framework: 模型框架
        eval_data: 评估数据

    Returns:
        包含最佳方法及其配置的字典
    """
    # 获取所有可用的压缩方法
    all_methods = get_method_registry()
    logger.info(f"可用的压缩方法: {list(all_methods.keys())}")

    # 创建适配器
    if framework == "pytorch":
        adapter = PyTorchAdapter(model)
    elif framework == "tensorflow":
        adapter = TensorFlowAdapter(model)
    elif framework == "onnx":
        adapter = ONNXAdapter(model)
    elif framework == "tflite":
        adapter = TFLiteAdapter(model)
    else:
        raise ValueError(f"不支持的框架: {framework}")

    # 使用ModelAnalyzer分析模型
    try:
        # 创建模型分析器
        analyzer = ModelAnalyzer(adapter)

        # 分析模型
        sample_input = adapter.generate_sample_input(model)
        analysis_results = analyzer.analyze_model(model, sample_input)

        # 提取有用信息
        bottlenecks = analysis_results.get('bottlenecks', {})
        model_info = analysis_results.get('model_info', {})
        suggestions = analysis_results.get('suggestions', [])

        # 根据分析结果选择方法
        if suggestions:
            # 使用分析器建议的最高优先级方法
            top_suggestion = max(suggestions, key=lambda x: x.get('priority', 0))
            method_name = top_suggestion.get('methods', ['quantization'])[0]

            # 获取方法类
            method_cls = get_method(method_name)
            default_config = method_cls.get_default_params() if method_cls else {}

            logger.info(f"根据分析器建议选择 {method_name} 方法")
            return {
                "method": method_name,
                "config": default_config
            }

        # 如果没有建议，根据瓶颈选择
        if 'compute' in bottlenecks and bottlenecks['compute']:
            # 处理计算瓶颈
            compute_bottleneck = bottlenecks['compute'][0]
            bottleneck_type = compute_bottleneck.get('layer_type', '')

            if bottleneck_type in ['Conv2d', 'Conv1d', 'Conv3d']:
                # 卷积层瓶颈
                if 'pruning' in all_methods:
                    logger.info("检测到卷积层计算瓶颈，选择剪枝方法")
                    return {
                        "method": "pruning",
                        "config": {"sparsity": 0.5, "method": "magnitude"}
                    }
            elif bottleneck_type in ['Linear', 'Dense']:
                # 全连接层瓶颈
                if 'low_rank' in all_methods:
                    logger.info("检测到全连接层计算瓶颈，选择低秩分解方法")
                    return {
                        "method": "low_rank",
                        "config": {"rank_ratio": 0.3}
                    }

        if 'memory' in bottlenecks and bottlenecks['memory']:
            # 处理内存瓶颈
            memory_bottleneck = bottlenecks['memory'][0]

            if 'quantization' in all_methods:
                logger.info("检测到内存瓶颈，选择量化方法")
                return {
                    "method": "quantization",
                    "config": {"bits": 8, "scheme": "asymmetric"}
                }

        # 如果没有明确的瓶颈，则回退到基础分析

        # 获取模型信息
        layers_info = adapter.get_layers_info(model)

        # 计算参数量
        total_params = sum(info.get("parameters", 0) for name, info in layers_info.items())

        # 计算每种参数在模型中的比例
        conv_params = sum(info.get("parameters", 0) for name, info in layers_info.items()
                          if info.get("type", "") in ["Conv1d", "Conv2d", "Conv3d"])

        fc_params = sum(info.get("parameters", 0) for name, info in layers_info.items()
                        if info.get("type", "") in ["Linear", "Dense"])

        # 计算比例
        conv_ratio = conv_params / total_params if total_params > 0 else 0
        fc_ratio = fc_params / total_params if total_params > 0 else 0

        logger.info(f"模型分析 - 总参数: {total_params}, 卷积参数占比: {conv_ratio:.2f}, 全连接参数占比: {fc_ratio:.2f}")

        # 根据模型特性选择方法
        if total_params > 100000000:  # 超大模型
            # 对于超大模型，低秩分解可能更有效
            if "low_rank" in all_methods:
                logger.info("检测到超大模型，选择低秩分解方法")
                return {
                    "method": "low_rank",
                    "config": {"rank_ratio": 0.3}
                }

        if conv_ratio > 0.6:  # 卷积为主的模型
            # 卷积网络通常适合剪枝
            if "pruning" in all_methods:
                logger.info("检测到卷积层占比高，选择剪枝方法")
                return {
                    "method": "pruning",
                    "config": {"sparsity": 0.5, "method": "magnitude"}
                }

        if fc_ratio > 0.4:  # 全连接层占比较高
            # 全连接层适合低秩分解
            if "factorization" in all_methods:
                logger.info("检测到全连接层占比高，选择因子分解方法")
                return {
                    "method": "factorization",
                    "config": {"rank_ratio": 0.3}
                }

        # 默认使用量化，它几乎适用于所有模型
        logger.info("使用默认量化方法")
        return {
            "method": "quantization",
            "config": {"bits": 8, "scheme": "asymmetric"}
        }

    except Exception as e:
        logger.error(f"自动选择压缩方法失败: {str(e)}")
        logger.error(traceback.format_exc())
        # 默认返回量化
        return {
            "method": "quantization",
            "config": {"bits": 8, "scheme": "asymmetric"}
        }


def main():
    parser = argparse.ArgumentParser(description="通用模型压缩工具")
    parser.add_argument("--model-path", required=True, help="模型文件路径")
    parser.add_argument("--output-dir", default="./output", help="输出目录")
    parser.add_argument("--device", default="cuda", help="运行设备")
    parser.add_argument("--auto", action="store_true", help="自动选择最优压缩策略")
    parser.add_argument("--methods", type=str, default="quantization", help="手动指定压缩方法(逗号分隔)")
    parser.add_argument("--sparsity", type=float, default=0.5, help="剪枝稀疏度")
    parser.add_argument("--bits", type=int, default=8, help="量化位数")
    parser.add_argument("--debug", action="store_true", help="调试模型文件结构")
    parser.add_argument("--model-type", type=str, default="", help="指定模型类型，如resnet18")
    parser.add_argument("--create-model", action="store_true", help="使用简单模型架构加载参数")
    parser.add_argument("--input-shape", type=str, default="", help="输入形状，如'1,3,224,224'")
    parser.add_argument("--output-size", type=int, default=None, help="输出大小(类别数)")
    parser.add_argument("--config", type=str, default="", help="JSON配置文件路径")
    args = parser.parse_args()

    # 检测模型框架
    try:
        framework = detect_framework(args.model_path)
        print(f"✔️ 检测到模型框架: {framework}")
    except ValueError as e:
        print(f"❌ {str(e)}")
        return

    # 解析形状
    input_shape = parse_shape(args.input_shape)

    # 加载配置（如果有）
    config = load_config(args.config)

    # 合并命令行参数和配置
    model_type = args.model_type or config.get("model_type")
    output_size = args.output_size or config.get("output_size")
    input_shape = input_shape or config.get("input_shape")

    # 如果启用调试模式，先调试模型文件
    if args.debug:
        print("🔍 调试模型文件...")
        loaded_data = debug_model_file(args.model_path, args.device)

        # 检查是否包含'params'键，并查看其结构
        if isinstance(loaded_data, dict) and 'params' in loaded_data:
            params = loaded_data['params']
            if isinstance(params, dict):
                print("params是字典类型，可能是状态字典。键列表:")
                print(list(params.keys()))
                print("前5个键值对:")
                for k, v in list(params.items())[:5]:
                    print(f"  '{k}': 形状为{v.shape if hasattr(v, 'shape') else '未知'}")

    # 加载模型
    if args.create_model and framework == "pytorch":
        print("🔧 创建简单模型架构并加载参数...")
        # 使用简单模型
        if input_shape and len(input_shape) >= 3:
            simple_model = create_simple_model(
                input_shape=(input_shape[1], input_shape[2], input_shape[3]) if len(input_shape) == 4 else input_shape,
                num_classes=output_size or 10
            )
        else:
            simple_model = create_simple_model(num_classes=output_size or 10)

        # 加载权重
        try:
            loaded_data = torch.load(args.model_path, map_location=args.device)
            if isinstance(loaded_data, dict):
                if 'params' in loaded_data and isinstance(loaded_data['params'], dict):
                    simple_model.load_state_dict(loaded_data['params'], strict=False)
                    print("✅ 成功加载模型参数！")
                    model = simple_model
                else:
                    print("❌ 无法找到适合加载的参数。")
                    return
            else:
                print("❌ 加载的数据不是字典类型。")
                return
        except Exception as e:
            print(f"❌ 参数加载失败: {str(e)}")
            return
    else:
        # 使用智能加载
        model = load_model(args.model_path, model_type, input_shape, output_size)

        if model is None:
            print("❌ 模型加载失败。请尝试以下方法:")
            print("  1. 使用 --model-type 指定模型类型 (如 resnet18, mobilenet_v2)")
            print("  2. 使用 --create-model 创建简单模型并加载参数")
            print("  3. 使用 --input-shape 指定输入形状 (如 '1,3,224,224')")
            print("  4. 使用 --debug 参数查看模型文件结构")
            return

    # 确保模型在正确的设备上
    device = torch.device(args.device if torch.cuda.is_available() and args.device == "cuda" else "cpu")
    if hasattr(model, 'to'):
        model = model.to(device)

    # 准备评估数据
    eval_data = get_eval_data(framework, input_shape)

    # 评估原始模型
    print("🔍 评估原始模型...")
    try:
        original_metrics = evaluate_model(model, eval_data, adapter_name=framework)
        print(f"📊 原始模型性能: {original_metrics}")
    except Exception as e:
        print(f"⚠️ 原始模型评估失败: {str(e)}")
        original_metrics = {}

    # 准备压缩配置
    if args.auto:
        # 自动选择最佳压缩方法
        best_method = select_best_compression_method(model, framework, eval_data)
        compress_config = {
            "constraints": {
                "accuracy_threshold": 0.9,
                "target_size": 0.5
            },
            "methods": {
                best_method["method"]: best_method["config"]
            }
        }
        print(f"🧠 自动选择压缩方法: {best_method['method']}")
    else:
        # 用户指定的方法
        method_list = args.methods.strip().split(",")
        methods_config = {}

        for method in method_list:
            if method == "quantization":
                methods_config["quantization"] = {
                    "bits": args.bits,
                    "scheme": "asymmetric",
                    "per_channel": False
                }
            elif method == "pruning":
                methods_config["pruning"] = {
                    "sparsity": args.sparsity,
                    "method": "magnitude",
                    "granularity": "element"
                }
            elif method in get_method_registry():
                # 使用方法的默认配置
                method_cls = get_method(method)
                methods_config[method] = method_cls.get_default_params()
            else:
                print(f"⚠️ 未知的压缩方法: {method}，将被忽略")

        compress_config = {
            "constraints": {
                "accuracy_threshold": 0.9,
                "target_size": 0.5
            },
            "methods": methods_config
        }

    # 执行压缩
    print("🚀 开始压缩...")

    manager = CompressionManager()

    try:
        compressed_model, stats = manager.compress(
            model,
            compress_config["constraints"],
            adapter_name=framework,
            eval_data=eval_data,
            method_config=compress_config["methods"]
        )

        # 导出压缩后的模型
        suffix = os.path.splitext(args.model_path)[-1]
        export_path = os.path.join(args.output_dir, f"compressed_model{suffix}")
        manager.export(compressed_model, export_path, adapter_name=framework)
        print(f"✅ 压缩模型保存至: {export_path}")

        # 生成压缩报告
        print("📈 生成压缩对比报告...")
        try:
            comparison = compare_models(model, compressed_model, eval_data, adapter_name=framework)

            # 打印比较结果
            if 'improvements' in comparison:
                imp = comparison['improvements']
                if 'size_reduction' in imp:
                    print(f"  模型大小减少: {imp['size_reduction']:.2f}倍")
                if 'param_reduction' in imp:
                    print(f"  参数数量减少: {imp['param_reduction']:.2f}倍")
                if 'accuracy_change' in imp:
                    print(f"  精度变化: {imp['accuracy_change'] * 100:+.2f}%")
                if 'latency_improvement' in imp:
                    print(f"  延迟改善: {imp['latency_improvement']:.2f}倍")

            # 保存可视化结果
            vis_dir = os.path.join(args.output_dir, "visualization")
            os.makedirs(vis_dir, exist_ok=True)

            plot_compression_results(comparison, vis_dir)
            print(f"🖼️ 可视化结果保存至: {vis_dir}")
        except Exception as e:
            print(f"⚠️ 生成报告失败: {str(e)}")
            print(traceback.format_exc())

        print("🎉 压缩完成！")

    except Exception as e:
        print(f"❌ 压缩失败: {str(e)}")
        print(traceback.format_exc())


if __name__ == "__main__":
    main()