import math

import gym
import numpy as np
from gym import spaces

from onpolicy.envs.env_core import EnvCore
from onpolicy.envs.multi_discrete import MultiDiscrete


class ContinuousActionEnv(object):
    """
    对于连续动作环境的封装
    Wrapper for continuous action environment.
    """

    def __init__(self, env_size=[]):
        self.env = EnvCore(env_size)
        self.num_agent = self.env.agent_num
        self.discrete_action_space = False
        self.signal_obs_dim = self.env.obs_dim
        self.signal_action_dim = self.env.action_dim

        # if true, action is a number 0...N, otherwise action is a one-hot N-dimensional vector
        self.discrete_action_input = False

        self.movable = True
        # configure spaces
        self.action_space = []
        self.observation_space = []
        self.share_observation_space = []
        share_obs_dim = 0
        for _ in range(self.num_agent):
            total_action_space = []

            # physical action space
            if self.movable:
                if self.discrete_action_space:
                    u_action_space = spaces.Discrete(self.signal_action_dim)
                else:
                    u_action_space = spaces.Box(
                        low=-1,
                        high=+1,
                        shape=(self.signal_action_dim,),
                        dtype=np.float32,
                    )  # [-1,1]
                total_action_space.append(u_action_space)

            # total action space
            if len(total_action_space) > 1:
                # all action spaces are discrete, so simplify to MultiDiscrete action space
                if all(
                    [
                        isinstance(act_space, spaces.Discrete)
                        for act_space in total_action_space
                    ]
                ):
                    act_space = MultiDiscrete(
                        [[0, act_space.n - 1] for act_space in total_action_space]
                    )
                else:
                    act_space = spaces.Tuple(total_action_space)
                self.action_space.append(act_space)
            else:
                self.action_space.append(total_action_space[0])

            # observation space
            obs_dim = self.signal_obs_dim
            share_obs_dim += obs_dim
            self.observation_space.append(
                spaces.Box(
                    low=-np.inf, high=+np.inf, shape=(obs_dim,), dtype=np.float32
                )
            )  # [-inf,inf]

        self.share_observation_space = [
            spaces.Box(
                low=-np.inf, high=+np.inf, shape=(share_obs_dim,), dtype=np.float32
            )
            for _ in range(self.num_agent)
        ]
        # # configure spaces
        # self.action_space = []
        # self.observation_space = []
        # self.share_observation_space = []

        # share_obs_dim = 0
        # total_action_space = []
        # for agent in range(self.num_agent):
        #     # physical action space
        #     u_action_space = spaces.Box(
        #         low=-math.pi,
        #         high=+math.pi,
        #         shape=(self.signal_action_dim,),
        #         dtype=np.float32,
        #     )

        #     if self.movable:
        #         total_action_space.append(u_action_space)

        #     # 既然是同一个，为什么要复制那么多？坑人的代码，我真是给你们一拳。
        #     self.action_space.append(total_action_space[agent])
        #     # observation space
        #     share_obs_dim += self.signal_obs_dim
        #     self.observation_space.append(
        #         spaces.Box(
        #             low=-np.inf,
        #             high=+np.inf,
        #             shape=(self.signal_obs_dim,),
        #             dtype=np.float32,
        #         )
        #     )  # [-inf,inf]

        # self.share_observation_space = [
        #     spaces.Box(
        #         low=-np.inf, high=+np.inf, shape=(share_obs_dim,), dtype=np.float32
        #     )
        #     for _ in range(self.num_agent)
        # ]

    def step(self, actions):
        """
        输入actions维度假设：
        # actions shape = (5, 2, 5)
        # 5个线程的环境，里面有2个智能体，每个智能体的动作是一个one_hot的5维编码

        Input actions dimension assumption:
        # actions shape = (5, 2, 5)
        # 5 threads of environment, there are 2 agents inside, and each agent's action is a 5-dimensional one_hot encoding
        """
        # print("actions = {}".format(actions))
        obs, communicate_mask, rews, dones, infos = self.env.step(actions)
        return (
            np.stack(obs),
            np.stack(communicate_mask),
            np.stack(rews),
            np.stack(dones),
            infos,
        )

    def EnvTranslate(self, observation):
        """
        将环境的观测数据转换为环境的状态

        Convert the observation data of the environment into the state of the environment
        """
        self.env.EnvTranslate(observation)

    def reset(self):
        obs = self.env.reset()
        return np.stack(obs)

    def get_obs(self):
        obs = self.env.get_obs()
        return np.stack(obs)

    def close(self):
        pass

    def render(self, mode="rgb_array"):
        return self.env.render(mode)

    def seed(self, seed):
        pass
