# uamcf/__main__.py
"""
UAMCF 命令行接口
"""

import os
import sys
import argparse
import json
from typing import Dict, Any, List, Optional

from .core import CompressionManager
from .utils import get_logger, setup_logger, load_config, plot_compression_results
from .adapters import get_adapter

logger = get_logger("CLI")


def parse_args():
    """解析命令行参数"""
    parser = argparse.ArgumentParser(
        description="UAMCF - Unified Adaptive Model Compression Framework",
        formatter_class=argparse.ArgumentDefaultsHelpFormatter
    )

    # 创建子命令
    subparsers = parser.add_subparsers(dest="command", help="Command to execute")

    # 压缩命令
    compress_parser = subparsers.add_parser("compress", help="Compress a model")
    compress_parser.add_argument("--model", "-m", required=True, help="Path to the model file")
    compress_parser.add_argument("--output", "-o", required=True, help="Output path for compressed model")
    compress_parser.add_argument("--config", "-c", required=False, help="Path to compression configuration file")
    compress_parser.add_argument("--framework", "-f", default="pytorch",
                                 choices=["pytorch", "tensorflow", "onnx", "tflite"],
                                 help="Deep learning framework")
    compress_parser.add_argument("--eval-data", "-e", help="Path to evaluation data")
    compress_parser.add_argument("--visualize", "-v", action="store_true", help="Visualize compression results")

    # 分析命令
    analyze_parser = subparsers.add_parser("analyze", help="Analyze a model")
    analyze_parser.add_argument("--model", "-m", required=True, help="Path to the model file")
    analyze_parser.add_argument("--output", "-o", help="Output path for analysis results")
    analyze_parser.add_argument("--framework", "-f", default="pytorch",
                                choices=["pytorch", "tensorflow", "onnx", "tflite"],
                                help="Deep learning framework")
    analyze_parser.add_argument("--visualize", "-v", action="store_true", help="Visualize analysis results")

    # 比较命令
    compare_parser = subparsers.add_parser("compare", help="Compare original and compressed models")
    compare_parser.add_argument("--original", required=True, help="Path to the original model file")
    compare_parser.add_argument("--compressed", required=True, help="Path to the compressed model file")
    compare_parser.add_argument("--output", "-o", help="Output path for comparison results")
    compare_parser.add_argument("--framework", "-f", default="pytorch",
                                choices=["pytorch", "tensorflow", "onnx", "tflite"],
                                help="Deep learning framework")
    compare_parser.add_argument("--eval-data", "-e", help="Path to evaluation data")
    compare_parser.add_argument("--visualize", "-v", action="store_true", help="Visualize comparison results")

    # 版本命令
    version_parser = subparsers.add_parser("version", help="Show UAMCF version")

    return parser.parse_args()


def compress_model(args):
    """压缩模型命令处理"""
    logger.info(f"Compressing model: {args.model}")

    # 加载配置
    config = {}
    if args.config:
        config = load_config(args.config)
        if not config:
            logger.error(f"Failed to load config from {args.config}")
            return 1

    # 创建管理器
    manager = CompressionManager(args.config)

    # 获取适配器
    adapter = get_adapter(args.framework)
    if adapter is None:
        logger.error(f"Unsupported framework: {args.framework}")
        return 1

    # 加载模型
    model = adapter.load_model(args.model)
    if model is None:
        logger.error(f"Failed to load model from {args.model}")
        return 1

    # 加载评估数据（如果有）
    eval_data = None
    if args.eval_data:
        from .utils import load_dataset
        eval_data = load_dataset(args.eval_data, args.framework)
        if eval_data is None:
            logger.warning(f"Failed to load evaluation data from {args.eval_data}")

    try:
        # 执行压缩
        compressed_model, stats = manager.compress(
            model,
            adapter_name=args.framework,
            eval_data=eval_data
        )

        # 导出压缩后的模型
        output_path = manager.export(
            compressed_model,
            args.output,
            adapter_name=args.framework
        )

        if not output_path:
            logger.error("Failed to export compressed model")
            return 1

        logger.info(f"Compressed model saved to: {output_path}")

        # 保存统计信息
        stats_path = os.path.splitext(args.output)[0] + "_stats.json"
        with open(stats_path, 'w') as f:
            json.dump(stats, f, indent=2)

        logger.info(f"Compression statistics saved to: {stats_path}")

        # 可视化结果
        if args.visualize:
            from .utils import plot_compression_results
            vis_dir = os.path.join(os.path.dirname(args.output), "visualization")
            plot_compression_results(stats, vis_dir)

        return 0

    except Exception as e:
        logger.error(f"Compression failed: {str(e)}")
        import traceback
        traceback.print_exc()
        return 1


def analyze_model(args):
    """分析模型命令处理"""
    logger.info(f"Analyzing model: {args.model}")

    # 获取适配器
    adapter = get_adapter(args.framework)
    if adapter is None:
        logger.error(f"Unsupported framework: {args.framework}")
        return 1

    # 加载模型
    model = adapter.load_model(args.model)
    if model is None:
        logger.error(f"Failed to load model from {args.model}")
        return 1

    try:
        # 创建分析器
        from .core import ModelAnalyzer
        analyzer = ModelAnalyzer()

        # 分析模型
        analysis_result = analyzer.analyze_model(model, adapter_name=args.framework)

        # 保存分析结果
        if args.output:
            output_dir = os.path.dirname(args.output)
            os.makedirs(output_dir, exist_ok=True)

            with open(args.output, 'w') as f:
                json.dump(analysis_result, f, indent=2, default=str)

            logger.info(f"Analysis results saved to: {args.output}")
        else:
            # 打印简要信息
            model_info = analysis_result.get("model_info", {})
            print("\nModel Summary:")
            print(f"  Type: {model_info.get('type', 'Unknown')}")
            print(f"  Parameters: {model_info.get('parameters', 0):,}")
            print(f"  Size: {model_info.get('size_bytes', 0) / (1024 * 1024):.2f} MB")
            print(f"  Layers: {model_info.get('layer_count', 0)}")

            # 打印主要瓶颈
            bottlenecks = analysis_result.get("bottlenecks", {})
            if "combined" in bottlenecks and bottlenecks["combined"]:
                print("\nMain Bottlenecks:")
                for i, b in enumerate(bottlenecks["combined"][:5]):
                    print(f"  {i + 1}. {b.get('name', 'Unknown')}")

        # 可视化结果
        if args.visualize:
            from .utils import plot_layer_statistics
            if args.output:
                vis_dir = os.path.join(os.path.dirname(args.output), "visualization")
            else:
                vis_dir = "visualization"
            plot_layer_statistics(analysis_result, vis_dir)

        return 0

    except Exception as e:
        logger.error(f"Analysis failed: {str(e)}")
        import traceback
        traceback.print_exc()
        return 1


def compare_models(args):
    """比较模型命令处理"""
    logger.info(f"Comparing models: {args.original} and {args.compressed}")

    # 获取适配器
    adapter = get_adapter(args.framework)
    if adapter is None:
        logger.error(f"Unsupported framework: {args.framework}")
        return 1

    # 加载原始模型
    original_model = adapter.load_model(args.original)
    if original_model is None:
        logger.error(f"Failed to load original model from {args.original}")
        return 1

    # 加载压缩模型
    compressed_model = adapter.load_model(args.compressed)
    if compressed_model is None:
        logger.error(f"Failed to load compressed model from {args.compressed}")
        return 1

    # 加载评估数据（如果有）
    eval_data = None
    if args.eval_data:
        from .utils import load_dataset
        eval_data = load_dataset(args.eval_data, args.framework)
        if eval_data is None:
            logger.warning(f"Failed to load evaluation data from {args.eval_data}")

    try:
        # 比较模型
        from .utils import compare_models as utils_compare_models
        comparison = utils_compare_models(
            original_model,
            compressed_model,
            eval_data,
            adapter_name=args.framework
        )

        # 保存比较结果
        if args.output:
            with open(args.output, 'w') as f:
                json.dump(comparison, f, indent=2, default=str)

            logger.info(f"Comparison results saved to: {args.output}")
        else:
            # 打印简要比较信息
            print("\nModel Comparison Summary:")

            # 大小比较
            if 'improvements' in comparison and 'size_reduction' in comparison['improvements']:
                size_reduction = comparison['improvements']['size_reduction']
                print(f"  Size Reduction: {size_reduction:.2f}x")

            # 参数比较
            if 'improvements' in comparison and 'param_reduction' in comparison['improvements']:
                param_reduction = comparison['improvements']['param_reduction']
                print(f"  Parameter Reduction: {param_reduction:.2f}x")

            # 精度比较
            if 'improvements' in comparison and 'accuracy_change' in comparison['improvements']:
                accuracy_change = comparison['improvements']['accuracy_change'] * 100
                print(f"  Accuracy Change: {accuracy_change:+.2f}%")

            # 延迟比较
            if 'improvements' in comparison and 'latency_improvement' in comparison['improvements']:
                latency_improvement = comparison['improvements']['latency_improvement']
                print(f"  Latency Improvement: {latency_improvement:.2f}x")

        # 可视化结果
        if args.visualize:
            from .utils import plot_compression_results
            if args.output:
                vis_dir = os.path.join(os.path.dirname(args.output), "visualization")
            else:
                vis_dir = "visualization"
            plot_compression_results(comparison, vis_dir, prefix="comparison")

        return 0

    except Exception as e:
        logger.error(f"Comparison failed: {str(e)}")
        import traceback
        traceback.print_exc()
        return 1


def show_version():
    """显示版本信息"""
    from . import __version__
    print(f"UAMCF version {__version__}")
    return 0


def main():
    """主函数"""
    # 解析命令行参数
    args = parse_args()

    # 设置日志
    setup_logger()

    # 根据命令执行对应操作
    if args.command == "compress":
        return compress_model(args)
    elif args.command == "analyze":
        return analyze_model(args)
    elif args.command == "compare":
        return compare_models(args)
    elif args.command == "version":
        return show_version()
    else:
        print("Please specify a command. Use --help for more information.")
        return 1


if __name__ == "__main__":
    sys.exit(main())