import ray
from typing import Dict, Any, List
from .agent import Worker, State
import numpy as np

@ray.remote
class ParameterServer:
    """分布式参数服务器"""
    
    def __init__(self, initial_params: Dict[str, Any]):
        self.params = initial_params
        self.gradients = []
        self.version = 0
        self.optimizers = {
            k: AdamOptimizer(learning_rate=0.001)
            for k in initial_params.keys()
        }
    
    def get_params(self, version: int = None) -> Dict[str, Any]:
        """获取当前参数
        Args:
            version: 如果指定，只有当服务器版本>=请求版本时才返回参数
        Returns:
            参数字典或None(如果版本不匹配)
        """
        if version is not None and self.version < version:
            return None
        return {
            "params": self.params,
            "version": self.version
        }
    
    def apply_gradients(self, grads: Dict[str, Any]):
        """应用梯度更新"""
        try:
            self.gradients.append(grads)
            
            # 每5个梯度更新一次
            if len(self.gradients) >= 5:
                # 计算平均梯度
                avg_grads = {
                    k: np.mean([g[k] for g in self.gradients], axis=0)
                    for k in self.gradients[0].keys()
                }
                
                # 使用优化器更新参数
                for k in self.params.keys():
                    self.params[k] = self.optimizers[k].apply_gradients(
                        self.params[k], 
                        avg_grads[k]
                    )
                
                self.version += 1
                self.gradients = []
                
        except Exception as e:
            print(f"参数更新失败: {str(e)}")
            # 保留梯度等待下次更新

@ray.remote
class DistributedWorker(Worker):
    """分布式工作节点"""
    
    def __init__(self, name: str, ps_actor):
        super().__init__(name)
        self.ps = ps_actor
        self.params_version = -1
    
    def sync_params(self):
        """从参数服务器同步参数"""
        try:
            result = ray.get(
                self.ps.get_params.remote(self.params_version)
            )
            
            if result is None:
                return False  # 参数未更新
            
            self.q_table = result["params"]["q_table"]
            self.learning_params = result["params"]["learning_params"]
            self.params_version = result["version"]
            return True
            
        except Exception as e:
            print(f"参数同步失败: {str(e)}")
            return False
    
    def push_gradients(self, grads: Dict[str, Any]):
        """推送梯度到参数服务器"""
        try:
            self.ps.apply_gradients.remote(grads)
            return True
        except Exception as e:
            print(f"梯度推送失败: {str(e)}")
            return False
    
    def train_step(self) -> Dict[str, Any]:
        """分布式训练步骤
        
        Returns:
            包含训练统计信息的字典
        """
        stats = {}
        
        # 同步参数
        if not self.sync_params():
            stats["status"] = "params_not_updated"
            return stats
            
        try:
            # 执行训练逻辑
            loss = super().train()
            
            # 计算并推送梯度
            grads = self.compute_gradients()
            push_success = self.push_gradients(grads)
            
            # 记录训练统计
            stats.update({
                "status": "success",
                "loss": loss,
                "gradient_push_success": push_success,
                "params_version": self.params_version,
                "timestamp": time.time()
            })
            
            # 同步评估结果
            if hasattr(self, "eval_results"):
                stats["eval_results"] = self.eval_results[-1] if self.eval_results else None
            
            return stats
            
        except Exception as e:
            stats.update({
                "status": "error",
                "error": str(e),
                "timestamp": time.time()
            })
            return stats

def init_ray():
    """初始化Ray集群"""
    if not ray.is_initialized():
        ray.init()

def create_distributed_system(num_workers: int = 4) -> Dict[str, Any]:
    """创建分布式训练系统
    
    Args:
        num_workers: 工作节点数量
        
    Returns:
        包含参数服务器和工作节点引用的字典
    """
    init_ray()
    
    # 初始参数
    initial_params = {
        "q_table": {
            state: {"explore": 0, "pursue": 1.0, "retreat": 0, "communicate": 0.5}
            for state in State
        },
        "learning_params": {
            "alpha": 0.1,
            "gamma": 0.95,
            "epsilon": 0.05
        }
    }
    
    # 创建参数服务器
    ps = ParameterServer.remote(initial_params)
    
    # 创建工作节点
    workers = [DistributedWorker.remote(f"Worker{i}", ps) for i in range(num_workers)]
    
    return {
        "parameter_server": ps,
        "workers": workers
    }

def aggregate_evaluations(workers: List[Any], env, n_episodes: int = 10) -> Dict[str, Any]:
    """汇总分布式评估结果
    
    Args:
        workers: 工作节点列表
        env: 评估环境
        n_episodes: 每个工作节点评估的episode数量
        
    Returns:
        包含汇总评估结果的字典
    """
    # 并行评估
    eval_refs = [w.evaluate.remote(env, n_episodes) for w in workers]
    results = ray.get(eval_refs)
    
    # 汇总结果
    all_rewards = []
    all_steps = []
    
    for res in results:
        all_rewards.append(res["avg_reward"])
        all_steps.append(res["avg_steps"])
    
    return {
        "avg_reward": float(np.mean(all_rewards)),
        "std_reward": float(np.std(all_rewards)),
        "max_reward": float(np.max(all_rewards)),
        "min_reward": float(np.min(all_rewards)),
        "avg_steps": float(np.mean(all_steps)),
        "timestamp": time.time(),
        "worker_results": results
    }