import numpy as np
from multiprocessing import Process, Pipe
from multiprocessing.connection import Connection


def worker(remote: Connection, parent_remote: Connection, env_fn_wrapper):
    """
    工作进程函数，用于并行环境执行
    
    Args:
        remote: 子进程连接
        parent_remote: 父进程连接
        env_fn_wrapper: 环境创建函数包装器
    """
    parent_remote.close()
    env = env_fn_wrapper.x()
    
    while True:
        cmd, data = remote.recv()
        if cmd == "step":
            obs, reward, done, info, available_actions = env.step(data)
            if np.all(done):  # 如果所有智能体都完成了
                obs, available_actions = env.reset()
            remote.send((obs, reward, done, info, available_actions))
        elif cmd == "reset":
            obs, available_actions = env.reset()
            remote.send((obs, available_actions))
        elif cmd == "close":
            env.close()
            remote.close()
            break
        elif cmd == "get_spaces":
            remote.send((env.obs_shape, env.act_shape))
        else:
            raise NotImplementedError


class CloudpickleWrapper:
    """
    使用cloudpickle包装对象，支持多进程序列化
    """
    
    def __init__(self, x):
        self.x = x
    
    def __getstate__(self):
        import cloudpickle
        return cloudpickle.dumps(self.x)
    
    def __setstate__(self, ob):
        import pickle
        self.x = pickle.loads(ob)


class ShareDummyVecEnv:
    """
    共享虚拟向量环境 - 优化版本
    用于多环境并行训练，所有环境共享同一个观测空间
    """
    
    def __init__(self, env_fns):
        """
        初始化共享虚拟向量环境
        
        Args:
            env_fns: 环境创建函数列表
        """
        self.waiting = False
        self.closed = False
        self.n_envs = len(env_fns)
        
        # 创建子进程
        self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(self.n_envs)])
        self.ps = [Process(target=worker, args=(work_remote, remote, CloudpickleWrapper(env_fn)))
                  for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)]
        
        # 启动进程
        for p in self.ps:
            p.daemon = True
            p.start()
        
        # 关闭工作端连接
        for remote in self.work_remotes:
            remote.close()
        
        # 获取环境空间信息
        self.remotes[0].send(("get_spaces", None))
        obs_shape, act_shape = self.remotes[0].recv()
        self.obs_shape = obs_shape
        self.act_shape = act_shape
    
    def step(self, actions):
        """
        执行动作
        
        Args:
            actions: 动作数组
            
        Returns:
            tuple: (观测, 奖励, 完成标志, 信息, 可用动作)
        """
        self.step_async(actions)
        return self.step_wait()
    
    def step_async(self, actions):
        """
        异步执行动作
        
        Args:
            actions: 动作数组
        """
        for remote, action in zip(self.remotes, actions):
            remote.send(("step", action))
        self.waiting = True
    
    def step_wait(self):
        """
        等待异步执行完成
        
        Returns:
            tuple: (观测, 奖励, 完成标志, 信息, 可用动作)
        """
        results = [remote.recv() for remote in self.remotes]
        self.waiting = False
        obs, rewards, dones, infos, available_actions = zip(*results)
        return np.stack(obs), np.stack(rewards), np.stack(dones), infos, np.stack(available_actions)
    
    def reset(self):
        """
        重置所有环境
        
        Returns:
            tuple: (观测, 可用动作)
        """
        for remote in self.remotes:
            remote.send(("reset", None))
        results = [remote.recv() for remote in self.remotes]
        obs, available_actions = zip(*results)
        return np.stack(obs), np.stack(available_actions)
    
    def close(self):
        """
        关闭所有环境
        """
        if self.closed:
            return
        if self.waiting:
            for remote in self.remotes:
                remote.recv()
        for remote in self.remotes:
            remote.send(("close", None))
        for p in self.ps:
            p.join()
        self.closed = True


class DummyVecEnv:
    """
    虚拟向量环境 - 优化版本
    用于单进程多环境训练
    """
    
    def __init__(self, env_fns):
        """
        初始化虚拟向量环境
        
        Args:
            env_fns: 环境创建函数列表
        """
        self.envs = [env_fn() for env_fn in env_fns]
        env = self.envs[0]
        
        # 获取环境空间信息
        self.obs_shape = env.obs_shape
        self.act_shape = env.act_shape
        self.n_envs = len(env_fns)
        self.n_agents = getattr(env, 'n_agents', 1)  # 获取智能体数量
    
    def step(self, actions):
        """
        执行动作
        
        Args:
            actions: 动作数组
            
        Returns:
            tuple: (观测, 奖励, 完成标志, 信息, 可用动作)
        """
        results = [env.step(action) for env, action in zip(self.envs, actions)]
        obs, rewards, dones, infos, available_actions = zip(*results)
        return np.stack(obs), np.stack(rewards), np.stack(dones), infos, np.stack(available_actions)
    
    def reset(self):
        """
        重置所有环境
        
        Returns:
            tuple: (观测, 可用动作)
        """
        results = [env.reset() for env in self.envs]
        obs, available_actions = zip(*results)
        return np.stack(obs), np.stack(available_actions)
    
    def close(self):
        """
        关闭所有环境
        """
        for env in self.envs:
            env.close()


class SingleEnvWrapper:
    """
    单环境包装器 - 简化版本
    用于包装单个环境，提供统一的接口
    """
    
    def __init__(self, env):
        """
        初始化单环境包装器
        
        Args:
            env: 单个环境实例
        """
        self.env = env
        self.obs_shape = env.obs_shape
        self.act_shape = env.act_shape
        self.n_agents = getattr(env, 'n_agents', 1)
        self.n_envs = 1
    
    def step(self, actions):
        """
        执行动作
        
        Args:
            actions: 动作数组
            
        Returns:
            tuple: (观测, 奖励, 完成标志, 信息, 可用动作)
        """
        # 如果actions是单个动作，转换为数组
        if not isinstance(actions, (list, np.ndarray)):
            actions = [actions]
        
        obs, rewards, dones, infos, available_actions = self.env.step(actions[0])
        
        # 转换为数组格式
        return (np.array([obs]), np.array([rewards]), np.array([dones]), 
                [infos], np.array([available_actions]))
    
    def reset(self):
        """
        重置环境
        
        Returns:
            tuple: (观测, 可用动作)
        """
        obs, available_actions = self.env.reset()
        return np.array([obs]), np.array([available_actions])
    
    def close(self):
        """
        关闭环境
        """
        self.env.close()
    
    @property
    def unwrapped(self):
        """
        获取原始环境
        
        Returns:
            原始环境实例
        """
        return self.env
