# uamcf/methods/nas.py
import torch
import torch.nn as nn
import numpy as np
import copy
import random
from typing import Dict, Any, Optional, List, Tuple, Union, Callable

from .base import CompressionMethod


class NeuralArchitectureSearch(CompressionMethod):
    """实现神经网络架构搜索压缩方法"""

    def __init__(self, config: Optional[Dict] = None):
        """
        初始化神经网络架构搜索方法

        Args:
            config: 配置参数，包括：
                - search_space: 搜索空间配置
                - search_strategy: 搜索策略，可选 'random', 'evolutionary', 'reinforcement'
                - max_trials: 最大搜索次数
                - evaluation_fn: 评估函数
                - fitness_fn: 适应度函数
                - target_size_ratio: 目标大小比例
                - population_size: 种群大小（进化算法）
                - mutation_prob: 变异概率（进化算法）
        """
        super().__init__(config)
        self.search_space = self.config.get("search_space", {})
        self.search_strategy = self.config.get("search_strategy", "random")
        self.max_trials = self.config.get("max_trials", 10)
        self.evaluation_fn = self.config.get("evaluation_fn", None)
        self.fitness_fn = self.config.get("fitness_fn", None)
        self.target_size_ratio = self.config.get("target_size_ratio", 0.5)
        self.population_size = self.config.get("population_size", 5)
        self.mutation_prob = self.config.get("mutation_prob", 0.1)

        self.logger.info(f"Initialized neural architecture search with strategy={self.search_strategy}, "
                         f"max_trials={self.max_trials}")

        # 记录搜索历史
        self.search_history = []
        self.best_model = None
        self.original_model = None

    def compress(self, model: nn.Module) -> nn.Module:
        """
        应用神经网络架构搜索压缩

        Args:
            model: 要压缩的模型

        Returns:
            压缩后的模型
        """
        self.logger.info(f"Applying neural architecture search to model")

        try:
            # 保存原始模型
            self.original_model = model

            # 确保有评估函数
            if self.evaluation_fn is None:
                self.logger.error("Evaluation function is required for NAS")
                return model

            # 如果没有提供适应度函数，使用默认适应度函数
            if self.fitness_fn is None:
                self.fitness_fn = self._default_fitness_fn

            # 执行架构搜索
            if self.search_strategy == "random":
                best_model = self._random_search()
            elif self.search_strategy == "evolutionary":
                best_model = self._evolutionary_search()
            elif self.search_strategy == "reinforcement":
                self.logger.warning(
                    "Reinforcement learning search not fully implemented, falling back to random search")
                best_model = self._random_search()
            else:
                self.logger.error(f"Unknown search strategy: {self.search_strategy}")
                return model

            # 如果搜索失败，返回原始模型
            if best_model is None:
                return model

            # 保存最佳模型
            self.best_model = best_model

            # 添加元数据
            best_model._nas_compressed = True
            best_model._search_strategy = self.search_strategy
            best_model._trials = len(self.search_history)

            return best_model

        except Exception as e:
            self.logger.error(f"Neural architecture search failed: {str(e)}")
            import traceback
            self.logger.error(traceback.format_exc())
            return model

    def _random_search(self) -> Optional[nn.Module]:
        """随机搜索策略"""
        best_model = None
        best_fitness = float('-inf')

        for trial in range(self.max_trials):
            self.logger.info(f"Random search trial {trial + 1}/{self.max_trials}")

            # 生成架构配置
            config = self._generate_random_config()

            # 创建模型
            try:
                candidate = self._create_model_from_config(config)

                # 评估模型
                eval_results = self.evaluation_fn(candidate)

                # 计算适应度
                fitness = self.fitness_fn(eval_results, candidate, self.original_model)

                # 记录结果
                self.search_history.append({
                    "trial": trial,
                    "config": config,
                    "fitness": fitness,
                    "eval_results": eval_results
                })

                self.logger.info(f"Trial {trial + 1}: fitness={fitness:.4f}")

                # 更新最佳模型
                if fitness > best_fitness:
                    best_fitness = fitness
                    best_model = candidate
                    self.logger.info(f"New best model found: fitness={fitness:.4f}")

            except Exception as e:
                self.logger.error(f"Error in trial {trial + 1}: {str(e)}")
                continue

        return best_model

    def _evolutionary_search(self) -> Optional[nn.Module]:
        """进化算法搜索策略"""
        # 初始化种群
        population = []
        for i in range(self.population_size):
            config = self._generate_random_config()
            try:
                model = self._create_model_from_config(config)
                eval_results = self.evaluation_fn(model)
                fitness = self.fitness_fn(eval_results, model, self.original_model)

                population.append({
                    "config": config,
                    "model": model,
                    "fitness": fitness,
                    "eval_results": eval_results
                })

                self.logger.info(f"Initial population {i + 1}: fitness={fitness:.4f}")
            except Exception as e:
                self.logger.error(f"Error in initial population {i + 1}: {str(e)}")
                # 生成另一个配置替代
                i -= 1

        # 如果无法初始化种群，返回None
        if not population:
            return None

        best_model = None
        best_fitness = float('-inf')

        # 开始进化
        for generation in range(self.max_trials // self.population_size):
            self.logger.info(f"Evolution generation {generation + 1}")

            # 对种群按适应度排序
            population.sort(key=lambda x: x["fitness"], reverse=True)

            # 记录当前最佳
            if population[0]["fitness"] > best_fitness:
                best_fitness = population[0]["fitness"]
                best_model = population[0]["model"]
                self.logger.info(f"New best model found: fitness={best_fitness:.4f}")

            # 创建下一代
            next_generation = [population[0]]  # 精英保留

            while len(next_generation) < self.population_size:
                # 选择父母
                parent1 = self._selection(population)
                parent2 = self._selection(population)

                # 交叉
                child_config = self._crossover(parent1["config"], parent2["config"])

                # 变异
                child_config = self._mutate(child_config)

                try:
                    # 创建子代模型
                    child_model = self._create_model_from_config(child_config)

                    # 评估子代
                    eval_results = self.evaluation_fn(child_model)
                    fitness = self.fitness_fn(eval_results, child_model, self.original_model)

                    # 添加到下一代
                    next_generation.append({
                        "config": child_config,
                        "model": child_model,
                        "fitness": fitness,
                        "eval_results": eval_results
                    })

                    # 记录结果
                    self.search_history.append({
                        "generation": generation,
                        "config": child_config,
                        "fitness": fitness,
                        "eval_results": eval_results
                    })

                    self.logger.info(f"Child: fitness={fitness:.4f}")

                except Exception as e:
                    self.logger.error(f"Error creating child: {str(e)}")

            # 更新种群
            population = next_generation

        return best_model

    def _generate_random_config(self) -> Dict:
        """生成随机架构配置"""
        config = {}

        # 处理搜索空间中的每个参数
        for param_name, param_space in self.search_space.items():
            if isinstance(param_space, list):
                # 从列表中随机选择
                config[param_name] = random.choice(param_space)
            elif isinstance(param_space, dict) and "range" in param_space:
                # 从范围中随机选择
                min_val = param_space["range"][0]
                max_val = param_space["range"][1]

                if param_space.get("type", "float") == "int":
                    config[param_name] = random.randint(min_val, max_val)
                else:
                    config[param_name] = random.uniform(min_val, max_val)
            else:
                # 直接使用默认值
                config[param_name] = param_space

        return config

    def _create_model_from_config(self, config: Dict) -> nn.Module:
        """根据配置创建模型"""
        # 这个方法需要根据具体的模型类型实现
        # 下面是一个示例实现，假设有一个create_model函数可以根据配置创建模型
        if "create_model_fn" in self.config:
            create_model_fn = self.config["create_model_fn"]
            model = create_model_fn(config)

            # 如果需要，从原始模型迁移权重
            if self.config.get("transfer_weights", False) and self.original_model is not None:
                self._transfer_weights(self.original_model, model)

            return model
        else:
            raise NotImplementedError("Model creation function not provided in config")

    def _transfer_weights(self, source_model: nn.Module, target_model: nn.Module) -> None:
        """将权重从源模型迁移到目标模型"""
        # 遍历目标模型的所有命名参数
        for target_name, target_param in target_model.named_parameters():
            # 在源模型中查找同名参数
            source_param = None
            for source_name, param in source_model.named_parameters():
                if source_name == target_name:
                    source_param = param
                    break

            # 如果找到匹配的参数且形状相同，则复制权重
            if source_param is not None and source_param.shape == target_param.shape:
                target_param.data.copy_(source_param.data)

    def _selection(self, population: List[Dict]) -> Dict:
        """选择算子 - 根据适应度选择个体"""
        # 锦标赛选择
        candidates = random.sample(population, min(3, len(population)))
        return max(candidates, key=lambda x: x["fitness"])

    def _crossover(self, config1: Dict, config2: Dict) -> Dict:
        """交叉算子 - 合并两个配置"""
        child_config = {}

        # 对每个参数随机选择一个父母的值
        for param_name in config1:
            if random.random() < 0.5:
                child_config[param_name] = config1[param_name]
            else:
                child_config[param_name] = config2[param_name]

        return child_config

    def _mutate(self, config: Dict) -> Dict:
        """变异算子 - 随机修改配置中的参数"""
        mutated_config = copy.deepcopy(config)

        for param_name, param_value in mutated_config.items():
            # 以一定概率变异
            if random.random() < self.mutation_prob:
                param_space = self.search_space.get(param_name, {})

                if isinstance(param_space, list):
                    # 从列表中随机选择新值
                    mutated_config[param_name] = random.choice(param_space)
                elif isinstance(param_space, dict) and "range" in param_space:
                    # 从范围中随机选择新值
                    min_val = param_space["range"][0]
                    max_val = param_space["range"][1]

                    if param_space.get("type", "float") == "int":
                        mutated_config[param_name] = random.randint(min_val, max_val)
                    else:
                        mutated_config[param_name] = random.uniform(min_val, max_val)

        return mutated_config

    def _default_fitness_fn(self, eval_results: Dict, model: nn.Module, original_model: nn.Module) -> float:
        """默认适应度函数，结合精度和大小"""
        # 获取精度
        accuracy = eval_results.get("accuracy", 0.0)

        # 计算大小比例
        original_size = sum(p.numel() for p in original_model.parameters())
        model_size = sum(p.numel() for p in model.parameters())
        size_ratio = model_size / original_size

        # 目标是保持高精度同时减小模型大小
        # 如果模型比目标还小，则不再奖励
        size_penalty = max(0, size_ratio - self.target_size_ratio) / (1 - self.target_size_ratio)

        # 组合成适应度分数 (0.7 * 精度 - 0.3 * 大小惩罚)
        fitness = 0.7 * accuracy - 0.3 * size_penalty

        return fitness

    def evaluate(self, original: Any, compressed: Any) -> Dict:
        """
        评估神经网络架构搜索前后的性能

        Args:
            original: 原始模型
            compressed: 压缩后的模型

        Returns:
            评估指标
        """
        metrics = {}

        try:
            # 计算参数数量
            orig_params = sum(p.numel() for p in original.parameters())
            comp_params = sum(p.numel() for p in compressed.parameters())

            metrics["original_parameters"] = int(orig_params)
            metrics["compressed_parameters"] = int(comp_params)
            metrics["parameters_ratio"] = float(comp_params / orig_params)

            # 计算模型大小
            orig_size = self._get_model_size(original)
            comp_size = self._get_model_size(compressed)

            metrics["original_size_bytes"] = int(orig_size)
            metrics["compressed_size_bytes"] = int(comp_size)
            metrics["size_ratio"] = float(comp_size / orig_size)

            # 添加搜索信息
            if hasattr(compressed, '_nas_compressed'):
                metrics["search_strategy"] = compressed._search_strategy
                metrics["trials"] = compressed._trials

            # 如果有评估结果，添加到指标中
            if self.search_history:
                best_result = max(self.search_history, key=lambda x: x["fitness"])
                metrics["best_fitness"] = float(best_result["fitness"])

                # 添加评估结果
                for key, value in best_result["eval_results"].items():
                    metrics[f"best_{key}"] = value

        except Exception as e:
            self.logger.error(f"Evaluation failed: {str(e)}")
            import traceback
            self.logger.error(traceback.format_exc())

        return metrics

    def _get_model_size(self, model: nn.Module) -> int:
        """获取模型大小（字节）"""
        size = 0
        for param in model.parameters():
            size += param.numel() * param.element_size()
        for buffer in model.buffers():
            size += buffer.numel() * buffer.element_size()
        return size

    def can_apply(self, layer_type: str) -> bool:
        """NAS应用于整个模型，而非单个层"""
        return False

    @classmethod
    def get_default_params(cls) -> Dict:
        """获取该压缩方法的默认参数"""
        return {
            "search_strategy": "random",
            "max_trials": 10,
            "target_size_ratio": 0.5,
            "population_size": 5,
            "mutation_prob": 0.1
        }

    def get_expected_metrics(self, layer_info: Dict) -> Dict:
        """获取应用该压缩方法的预期影响"""
        # 架构搜索通常能在有限大小下保持较高精度
        return {
            "accuracy_ratio": 0.9,  # 精度保持率
            "size_ratio": self.target_size_ratio,  # 大小减少率（按目标大小）
            "latency_ratio": 0.7,  # 延迟改进率
            "memory_ratio": self.target_size_ratio  # 内存减少率
        }