# uamcf/core/manager.py
import os
import json
import logging
from typing import Dict, Any, Optional, Union, Tuple

from .analyzer import ModelAnalyzer
from .scheduler import CompressionScheduler
from .pipeline import PipelineController
from ..utils.logger import get_logger


class ConfigManager:
    """配置管理类，负责加载、验证和保存压缩配置"""

    def __init__(self, config_path: Optional[str] = None):
        self.logger = get_logger("ConfigManager")
        self.config = {}
        if config_path:
            self.load_config(config_path)
        else:
            self._init_default_config()

    def _init_default_config(self) -> None:
        """初始化默认配置"""
        self.config = {
            "version": "1.0.0",
            "compression": {
                "methods": ["quantization", "pruning", "distillation"],
                "default_method": "quantization"
            },
            "constraints": {
                "accuracy_threshold": 0.95,
                "memory_limit": None,
                "latency_target": None,
                "size_limit": None
            },
            "evaluation": {
                "batch_size": 32,
                "metrics": ["accuracy", "latency", "memory"]
            },
            "tuning": {
                "enabled": True,
                "iterations": 5,
                "early_stopping": True
            }
        }

    def load_config(self, config_path: str) -> None:
        """从文件加载配置"""
        if not os.path.exists(config_path):
            self.logger.warning(f"Config file {config_path} not found. Using default configuration.")
            self._init_default_config()
            return

        try:
            with open(config_path, 'r') as f:
                loaded_config = json.load(f)

            # 合并配置，保留默认值作为回退
            self._init_default_config()
            self._merge_configs(self.config, loaded_config)
            self.logger.info(f"Configuration loaded from {config_path}")
        except Exception as e:
            self.logger.error(f"Failed to load config: {str(e)}")
            self._init_default_config()

    def _merge_configs(self, base_config: Dict, new_config: Dict) -> None:
        """递归合并配置"""
        for key, value in new_config.items():
            if key in base_config and isinstance(base_config[key], dict) and isinstance(value, dict):
                self._merge_configs(base_config[key], value)
            else:
                base_config[key] = value

    def save_config(self, output_path: str) -> None:
        """保存当前配置到文件"""
        try:
            with open(output_path, 'w') as f:
                json.dump(self.config, f, indent=2)
            self.logger.info(f"Configuration saved to {output_path}")
        except Exception as e:
            self.logger.error(f"Failed to save config: {str(e)}")

    def update_config(self, updates: Dict) -> None:
        """更新配置的特定部分"""
        self._merge_configs(self.config, updates)
        self.logger.debug("Configuration updated")

    def get_config(self) -> Dict:
        """获取当前配置"""
        return self.config


class CompressionManager:
    """压缩管理器，提供统一的接口控制压缩过程"""

    def __init__(self, config_path: Optional[str] = None):
        self.logger = get_logger("CompressionManager")
        self.config_manager = ConfigManager(config_path)
        self.analyzer = ModelAnalyzer()
        self.scheduler = CompressionScheduler()
        self.pipeline = PipelineController()

        # 设置组件配置
        self._configure_components()

    def _configure_components(self) -> None:
        """配置各个组件"""
        config = self.config_manager.get_config()
        self.pipeline.configure(config)

    def compress(self, model: Any, constraints: Optional[Dict[str, Any]] = None,
                 adapter_name: str = "pytorch", eval_data: Any = None) -> Tuple[Any, Dict]:
        """
        执行模型压缩

        Args:
            model: 要压缩的模型
            constraints: 压缩约束条件
            adapter_name: 使用的适配器名称
            eval_data: 用于评估的数据

        Returns:
            压缩后的模型和压缩统计信息
        """
        import torch
        import numpy as np

        # 设置随机种子以确保可重现性
        torch.manual_seed(42)
        np.random.seed(42)
        if torch.cuda.is_available():
            torch.cuda.manual_seed(42)

        # 确保模型在正确的设备上
        device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        if hasattr(model, 'to'):
            model = model.to(device)
            self.logger.info(f"Model moved to device: {device}")

        # 确保模型在评估模式进行前期评估
        if hasattr(model, 'eval'):
            model.eval()
            self.logger.info("Model set to evaluation mode for evaluation")

        # 更新约束配置
        if constraints:
            self.config_manager.update_config({"constraints": constraints})

        config = self.config_manager.get_config()

        # 在压缩前评估原始模型 - 确保可靠评估
        from ..utils.evaluator import evaluate_model
        self.logger.info("Evaluating original model before compression...")

        # 对原始模型进行三次评估来确保结果可靠
        original_metrics = evaluate_model(model, eval_data, adapter_name)
        self.logger.info(f"Original model metrics: {original_metrics}")

        # 如果原始模型精度异常低，尝试再次评估
        if 'accuracy' in original_metrics and original_metrics['accuracy'] < 0.05:
            self.logger.warning(f"Original model accuracy suspiciously low: {original_metrics['accuracy']:.6f}")
            self.logger.info("Re-evaluating original model...")

            # 确保模型重置为评估模式
            if hasattr(model, 'eval'):
                model.eval()

            # 重新评估
            retry_metrics = evaluate_model(model, eval_data, adapter_name)
            self.logger.info(f"Re-evaluated original model metrics: {retry_metrics}")

            # 如果重新评估结果更合理，则使用新结果
            if 'accuracy' in retry_metrics and retry_metrics['accuracy'] > original_metrics['accuracy']:
                self.logger.info(
                    f"Using re-evaluated metrics with higher accuracy: {retry_metrics['accuracy']:.6f} vs {original_metrics['accuracy']:.6f}")
                original_metrics = retry_metrics

        # 设置更合理的量化配置
        methods_config = {}
        if constraints and "methods" in constraints:
            methods_config = constraints["methods"]
        else:
            # 如果没有明确指定方法，使用更安全的量化设置
            methods_config = {
                "quantization": {
                    "bits": 8,  # 使用8位量化以保持较好精度
                    "scheme": "asymmetric",
                    "per_channel": False
                }
            }
            constraints = constraints or {}
            constraints["methods"] = methods_config
            self.config_manager.update_config({"constraints": constraints})
            config = self.config_manager.get_config()
            self.logger.info("Using optimized quantization settings: 8-bit asymmetric")

        # 分析模型
        self.logger.info("Analyzing model structure...")
        model_info = self.analyzer.analyze_model(model, adapter_name=adapter_name)

        # 生成压缩计划
        self.logger.info("Generating compression plan...")
        compression_plan = self.scheduler.generate_plan(
            model_info,
            config["constraints"]
        )

        # 执行压缩
        self.logger.info("Executing compression plan...")
        compressed_model, stats = self.pipeline.execute(
            model,
            compression_plan,
            adapter_name=adapter_name,
            eval_data=eval_data
        )

        # 确保压缩后的模型在正确的设备上并处于评估模式
        if hasattr(compressed_model, 'to'):
            compressed_model = compressed_model.to(device)
        if hasattr(compressed_model, 'eval'):
            compressed_model.eval()

        # 压缩后评估
        self.logger.info("Evaluating compressed model after compression...")
        compressed_metrics = evaluate_model(compressed_model, eval_data, adapter_name)
        self.logger.info(f"Compressed model metrics: {compressed_metrics}")

        # 记录准确率变化
        if 'accuracy' in original_metrics and 'accuracy' in compressed_metrics:
            accuracy_change = compressed_metrics['accuracy'] - original_metrics['accuracy']
            self.logger.info(f"Accuracy change: {accuracy_change:.6f} ({accuracy_change * 100:+.2f}%)")

            # 如果精度变化不合理（如提高超过20%），发出警告并重新检查
            if accuracy_change > 0.2:
                self.logger.warning(f"Unusually large accuracy improvement detected: {accuracy_change:.6f}")
                self.logger.info("This may indicate evaluation issues. Consider validating on separate test set.")

        # 计算真实的大小减少
        if 'size_bytes' in original_metrics and 'size_bytes' in compressed_metrics and compressed_metrics[
            'size_bytes'] > 0:
            size_reduction = original_metrics['size_bytes'] / compressed_metrics['size_bytes']
            stats['size_reduction'] = size_reduction
            self.logger.info(f"Size reduction: {size_reduction:.2f}x")
        else:
            stats['size_reduction'] = 'N/A'

        self.logger.info(f"Compression completed. Size reduction: {stats.get('size_reduction', 'N/A')}x")
        return compressed_model, stats

    def export(self, model: Any, output_path: str, format: str = "native",
               adapter_name: str = "pytorch") -> str:
        """
        导出压缩后的模型

        Args:
            model: 要导出的模型
            output_path: 输出路径
            format: 导出格式
            adapter_name: 使用的适配器名称

        Returns:
            导出文件的路径
        """
        self.logger.info(f"Exporting compressed model to {format} format...")
        export_path = self.pipeline.export_model(
            model,
            output_path,
            format=format,
            adapter_name=adapter_name
        )

        # 保存压缩配置
        config_path = os.path.join(os.path.dirname(output_path), "compression_config.json")
        self.config_manager.save_config(config_path)

        self.logger.info(f"Model exported to: {export_path}")
        return export_path

    def get_supported_methods(self) -> Dict[str, Dict]:
        """获取支持的压缩方法及其参数"""
        return self.scheduler.get_supported_methods()

    def get_supported_adapters(self) -> Dict[str, str]:
        """获取支持的适配器"""
        return self.pipeline.get_supported_adapters()