import time
from google.protobuf import descriptor
from mlagents_envs.environment import UnityEnvironment
from mlagents_envs.side_channel.engine_configuration_channel import EngineConfigurationChannel
from mlagents_envs.side_channel.environment_parameters_channel import EnvironmentParametersChannel
from cores.environment_setting_channel import EnvironmentSettingChannel, EnvPara, EnvParaType
from cores.string_log_channel import StringLogChannel
from cores.environment_state_channel import EnvironmentStateChannel
from mlagents_envs.environment import ActionTuple
import numpy as np
import torch


class Single_scene():
    def __init__(self, args) -> None:
        ECC = EngineConfigurationChannel()
        EPC = EnvironmentParametersChannel()
        ESTC = EnvironmentSettingChannel()
        ESC = EnvironmentStateChannel()
        SLC = StringLogChannel()
        self.env = env = UnityEnvironment(side_channels=[ECC, EPC, ESTC, SLC, ESC], worker_id=13,
                                          no_graphics=True, file_name="/home/cxg6/FlyAndDodge/Builds/Builds/LinuxBuild/LinuxBuild.x86_64")
        # 设置时间尺度，场景大小
        ECC.set_configuration_parameters(
            time_scale=20, width=1200, height=675)
        SLC.send_string("Test StringLogChannel.")
        #############################################################################################
        ###----------------------------Environment Parameter Settings-----------------------------###
        #############################################################################################
        ESTC.set_environment_parameter(EnvPara.seed, EnvParaType.INT, 5050)
        ESTC.set_environment_parameter(
            EnvPara.playground_number, EnvParaType.INT, 1)

        # All the position below is localPosition, which being relative to the playground gameObject
        # Target position
        ESTC.set_environment_parameter(
            EnvPara.target_pos_x, EnvParaType.FLOAT, 0.0)
        ESTC.set_environment_parameter(
            EnvPara.target_pos_y, EnvParaType.FLOAT, 20.0)
        ESTC.set_environment_parameter(
            EnvPara.target_pos_z, EnvParaType.FLOAT, 150.0)

        # Agent's spawn position
        ESTC.set_environment_parameter(
            EnvPara.spawn_pos_x, EnvParaType.FLOAT, 0.0)
        ESTC.set_environment_parameter(
            EnvPara.spawn_pos_y, EnvParaType.FLOAT, 10.0)
        ESTC.set_environment_parameter(
            EnvPara.spawn_pos_z, EnvParaType.FLOAT, 0.0)

        # Set random rotation every time agent respawn
        ESTC.set_environment_parameter(
            EnvPara.enable_random_airplane_spawn_rotation, EnvParaType.BOOL, False)

        # Agents should only fly in a fixed cuboid area.
        # If agents fly out the area, the task failed.
        ESTC.set_environment_parameter(
            EnvPara.enable_boundary_check, EnvParaType.BOOL, True)
        ESTC.set_environment_parameter(
            EnvPara.boundary_forward, EnvParaType.FLOAT, 170.0)
        ESTC.set_environment_parameter(
            EnvPara.boundary_backward, EnvParaType.FLOAT, -10.0)
        ESTC.set_environment_parameter(
            EnvPara.boundary_up, EnvParaType.FLOAT, 100.0)
        ESTC.set_environment_parameter(
            EnvPara.boundary_down, EnvParaType.FLOAT, -10.0)
        ESTC.set_environment_parameter(
            EnvPara.boundary_left, EnvParaType.FLOAT, -40.0)
        ESTC.set_environment_parameter(
            EnvPara.boundary_right, EnvParaType.FLOAT, 40.0)

        # If agents is too far from target, the task failed.
        ESTC.set_environment_parameter(
            EnvPara.max_distance_to_target, EnvParaType.FLOAT, 1000.0)

        # The number of agent per playground, only support 1 recently.
        ESTC.set_environment_parameter(
            EnvPara.agent_number_per_playground, EnvParaType.INT, 1)

        # z coordinate of badSpheres. Also indicate the number of badSpheres.
        ESTC.set_environment_parameter(
            EnvPara.badspheres_pos_z, EnvParaType.LIST_FLOAT, [70, 75, 80, 85])

        # The x,y coordinates and scale of badSphere is random.
        ESTC.set_environment_parameter(
            EnvPara.badsphere_pos_x, EnvParaType.RANDOM_UNIFORM, [0, 0, 0])
        ESTC.set_environment_parameter(
            EnvPara.badsphere_pos_y, EnvParaType.RANDOM_UNIFORM, [10, 10, 10])
        ESTC.set_environment_parameter(
            EnvPara.badsphere_scale, EnvParaType.RANDOM_UNIFORM, [20, 20, 20])

        # The scale of the distance reward
        # distance reward(every action) = Tanh(distanceToTarget * 0.001f) * scale
        ESTC.set_environment_parameter(
            EnvPara.distance_reward_scale, EnvParaType.FLOAT, 0.001)

        # The scale of the angularVelocity punish
        # angularVelocity punish(every action) = -Tanh(Abs(angularVelocity.sqrMagnitude * 0.1f)) * scale
        ESTC.set_environment_parameter(
            EnvPara.angular_velocity_punish_scale, EnvParaType.FLOAT, 0.001)

        # The scale of the time consume punish
        # time consume punish(every action) = -scale * 1
        ESTC.set_environment_parameter(
            EnvPara.time_consume_punish_scale, EnvParaType.FLOAT, 0.001)

        # If unifiedEpisode=true, you should using env.reset() to Begin an episode
        # In detail, if unifiedEpisode=true, an agent would not immediately reborn after finishing/failing the task.
        # if unifiedEpisode = false, it would.
        # 如果你用mlagent风格的代码训练，选false
        # 如果你用gym风格的代码训练，选true
        ESTC.set_environment_parameter(
            EnvPara.unified_episode, EnvParaType.BOOL, True)

        # Whether to end the episode, if airplane collide with badSphere
        ESTC.set_environment_parameter(
            EnvPara.is_crash_end_episode, EnvParaType.BOOL, True)
        #############################################################################################
        ###----------------------------Environment Parameter Settings-----------------------------###
        #############################################################################################

        self.env.reset()
        self.BEHAVIOR_NAME = "FlyAndDodge?team=0"
        self.max_steps = 1000

    @property
    def action_shape(self):
        # todo: 需要从环境里面得到，而不是手动设置
        return 3

    @property
    def obs_shape(self):
        return 21

    def reset(self):
        self.env.reset()
        self.bhspec = self.env.behavior_specs[self.BEHAVIOR_NAME]
        self.step_num = 0
        # 得到第一步对应的信息
        while True:
            # 确保在reset之后的第一次get_steps时，返回需要决策智能体的obs
            decision_steps, terminal_steps = self.env.get_steps(
                self.BEHAVIOR_NAME)
            if len(decision_steps) > 0:
                # 需要确保初始化后所有的智能体此时都请求了决策，
                assert len(decision_steps) == 1
                break
        obs = decision_steps.obs[0]
        self.last_decision_steps = decision_steps
        self.last_terminal_steps = terminal_steps
        return obs

    def randam_actions(self):
        """
        仿照gym中env.action_space.sample的方法，得到当前状态下的随机动作，可以用来测试环境
        todo: '''
              直接返回长度为1的动作，
              '''
        """

        if hasattr(self, "bhspec"):
            sample_actions = self.bhspec.action_spec.random_action(
                1)
            return sample_actions.continuous
        else:
            raise Exception("请调用env.reset()")

    def step(self, actions):
        self.step_num += 1
        if type(actions) == np.ndarray:
            assert actions.shape == (1, self.action_shape), "请传入合适维度的动作"
        elif type(actions) == list:
            actions = np.array(actions).reshape(1, self.action_shape)

        action_tuple = ActionTuple()
        action_tuple.add_continuous(actions)

        self.env.set_actions(self.BEHAVIOR_NAME, action_tuple)
        # 调用环境的step方法
        self.env.step()
        # 得到新的decision_steps，和terminal_steps
        decision_steps, terminal_steps = self.env.get_steps(
            self.BEHAVIOR_NAME)
        if len(terminal_steps) > 0:
            next_obs = terminal_steps.obs[0]
            done = True
            reward = terminal_steps.reward
            info = "reset the environment"
            return next_obs, reward, done, info
        elif len(decision_steps) > 0:
            next_obs = decision_steps.obs[0]
            done = False
            if self.step_num > self.max_steps:
                done = True
            reward = decision_steps.reward
            info = ""
            return next_obs, reward, done, info
        else:
            raise Exception("decision_steps和terminal_steps长度都为0，环境出现bug")

    def close(self):
        self.env.close()


class Multi_scene():
    # 注意agent_id的取值，在自己这边，对应的索引是agent_id-1
    def __init__(self, args):
        ECC = EngineConfigurationChannel()
        EPC = EnvironmentParametersChannel()
        ESTC = EnvironmentSettingChannel()
        ESC = EnvironmentStateChannel()
        SLC = StringLogChannel()
        self.env = env = UnityEnvironment(side_channels=[ECC, EPC, ESTC, SLC, ESC], worker_id=20,
                                          no_graphics=True, file_name="/home/cxg6/FlyAndDodge/Builds/Builds/LinuxBuild/LinuxBuild.x86_64")
        ECC.set_configuration_parameters(
            time_scale=20, width=1200, height=675)
        SLC.send_string("Test StringLogChannel.")

        #############################################################################################
        ###----------------------------Environment Parameter Settings-----------------------------###
        #############################################################################################
        ESTC.set_environment_parameter(EnvPara.seed, EnvParaType.INT, 5050)
        ESTC.set_environment_parameter(
            EnvPara.playground_number, EnvParaType.INT, 10)

        # All the position below is localPosition, which being relative to the playground gameObject
        # Target position
        ESTC.set_environment_parameter(
            EnvPara.target_pos_x, EnvParaType.FLOAT, 0.0)
        ESTC.set_environment_parameter(
            EnvPara.target_pos_y, EnvParaType.FLOAT, 20.0)
        ESTC.set_environment_parameter(
            EnvPara.target_pos_z, EnvParaType.FLOAT, 150.0)

        # Agent's spawn position
        ESTC.set_environment_parameter(
            EnvPara.spawn_pos_x, EnvParaType.FLOAT, 0.0)
        ESTC.set_environment_parameter(
            EnvPara.spawn_pos_y, EnvParaType.FLOAT, 10.0)
        ESTC.set_environment_parameter(
            EnvPara.spawn_pos_z, EnvParaType.FLOAT, 0.0)

        # Set random rotation every time agent respawn
        ESTC.set_environment_parameter(
            EnvPara.enable_random_airplane_spawn_rotation, EnvParaType.BOOL, False)

        # Agents should only fly in a fixed cuboid area.
        # If agents fly out the area, the task failed.
        ESTC.set_environment_parameter(
            EnvPara.enable_boundary_check, EnvParaType.BOOL, True)
        ESTC.set_environment_parameter(
            EnvPara.boundary_forward, EnvParaType.FLOAT, 170.0)
        ESTC.set_environment_parameter(
            EnvPara.boundary_backward, EnvParaType.FLOAT, -10.0)
        ESTC.set_environment_parameter(
            EnvPara.boundary_up, EnvParaType.FLOAT, 100.0)
        ESTC.set_environment_parameter(
            EnvPara.boundary_down, EnvParaType.FLOAT, -10.0)
        ESTC.set_environment_parameter(
            EnvPara.boundary_left, EnvParaType.FLOAT, -40.0)
        ESTC.set_environment_parameter(
            EnvPara.boundary_right, EnvParaType.FLOAT, 40.0)

        # If agents is too far from target, the task failed.
        ESTC.set_environment_parameter(
            EnvPara.max_distance_to_target, EnvParaType.FLOAT, 1000.0)

        # The number of agent per playground, only support 1 recently.
        ESTC.set_environment_parameter(
            EnvPara.agent_number_per_playground, EnvParaType.INT, 1)

        # z coordinate of badSpheres. Also indicate the number of badSpheres.
        ESTC.set_environment_parameter(
            EnvPara.badspheres_pos_z, EnvParaType.LIST_FLOAT, [70, 75, 80, 85])

        # The x,y coordinates and scale of badSphere is random.
        ESTC.set_environment_parameter(
            EnvPara.badsphere_pos_x, EnvParaType.RANDOM_UNIFORM, [0, 0, 0])
        ESTC.set_environment_parameter(
            EnvPara.badsphere_pos_y, EnvParaType.RANDOM_UNIFORM, [10, 10, 10])
        ESTC.set_environment_parameter(
            EnvPara.badsphere_scale, EnvParaType.RANDOM_UNIFORM, [20, 20, 20])

        # The scale of the distance reward
        # distance reward(every action) = Tanh(distanceToTarget * 0.001f) * scale
        ESTC.set_environment_parameter(
            EnvPara.distance_reward_scale, EnvParaType.FLOAT, 0.001)

        # The scale of the angularVelocity punish
        # angularVelocity punish(every action) = -Tanh(Abs(angularVelocity.sqrMagnitude * 0.1f)) * scale
        ESTC.set_environment_parameter(
            EnvPara.angular_velocity_punish_scale, EnvParaType.FLOAT, 0.001)

        # The scale of the time consume punish
        # time consume punish(every action) = -scale * 1
        ESTC.set_environment_parameter(
            EnvPara.time_consume_punish_scale, EnvParaType.FLOAT, 0.001)

        # If unifiedEpisode=true, you should using env.reset() to Begin an episode
        # In detail, if unifiedEpisode=true, an agent would not immediately reborn after finishing/failing the task.
        # if unifiedEpisode = false, it would.
        # 如果你用mlagent风格的代码训练，选false
        # 如果你用gym风格的代码训练，选true
        ESTC.set_environment_parameter(
            EnvPara.unified_episode, EnvParaType.BOOL, True)

        # Whether to end the episode, if airplane collide with badSphere
        ESTC.set_environment_parameter(
            EnvPara.is_crash_end_episode, EnvParaType.BOOL, True)
        #############################################################################################
        ###----------------------------Environment Parameter Settings-----------------------------###
        #############################################################################################

        
        self.env.reset()
        self.BEHAVIOR_NAME = "FlyAndDodge?team=0"
        self.max_steps = 1000

    @property
    def action_shape(self):
        return 3

    @property
    def obs_shape(self):
        return 21

    def reset(self):
        # 初始化环境
        self.env.reset()
        # 保存智能体死亡的信息（就是结束回合）
        self.agent_isdead = [0] * 10
        # 返回调用了多少次step
        self.step_num = 0
        self.terminal_obs_reward = [[] for _ in range(10)]
        self.bhspec = self.env.behavior_specs[self.BEHAVIOR_NAME]
        while True:
            decision_steps, terminal_steps = self.env.get_steps(
                self.BEHAVIOR_NAME)
            if len(decision_steps) > 0:
                assert len(decision_steps) == 10
                break

        obs = decision_steps.obs[0]
        # 每次用self.last_decision_steps保存存在决策的decision_steps
        self.last_decision_steps = decision_steps
        self.last_terminal_steps = terminal_steps
        return obs

    def randam_actions(self):
        """
        仿照gym中env.action_space.sample的方法，得到当前状态下的随机动作，可以用来测试环境
        todo: '''目前的做法是先使用bhspec.action_spec.random_action得到actions_tuple， \
                 然后返回continues得到对应的采样动作，\
                 需要根据bhspec得到环境中动作和状态的属性
              '''        
        """
        if hasattr(self, "bhspec"):
            sample_actions = self.bhspec.action_spec.random_action(
                10)
            return sample_actions.continuous
        else:
            raise Exception("请调用env.reset()")

    def step(self, actions):
       
        self.step_num += 1

     
        # 将智能体动作根据decision_steps传入对应智能体
        input_actions = []
        for agent_id in self.last_decision_steps:
            input_actions.append(actions[agent_id-1])
        input_actions = np.array(input_actions).reshape(
            len(self.last_decision_steps), 3)
        # 将action包装成对应的动作
        action_tuple = ActionTuple()
        action_tuple.add_continuous(input_actions)
        self.env.set_actions(self.BEHAVIOR_NAME, action_tuple)
        # 接下来，需要不断调用env.get_steps函数，直到智能体给出需要决策的请求
        while True:
            # 调用环境的step方法
            self.env.step()
            decision_steps, terminal_steps = self.env.get_steps(
                self.BEHAVIOR_NAME)
            if len(terminal_steps) > 0:
                for agent_id in terminal_steps:
                    
                    assert self.agent_isdead[agent_id -
                                             1] != 1, "已经结束回合的智能体不能重新重新结束回合"
                    self.agent_isdead[agent_id-1] = 1
                    self.terminal_obs_reward[agent_id-1] = [
                        terminal_steps[agent_id].obs[0], terminal_steps[agent_id].reward]
                   
            if len(decision_steps) > 0:
                assert len(decision_steps) == len(self.agent_isdead) - sum(self.agent_isdead), \
                    "需要决策的智能体数目应该等于存活智能体数目"
                # 发现智能体发出决策请求，将这个时刻的信息，作为上次调用actions之后的next_obs的信息
                break

            # 如果所有智能体都死了，调出循环
            if sum(self.agent_isdead) == len(self.agent_isdead):
                break


        # 目前的思路是，将它死亡对应的obs和reward保存起来，作为obs和reward
        # 这个设置需要确保在计算的时候，用done信息正确处理奖励的计算，否则会出现奖励重复的问题
        next_obs = [[] for _ in range(10)]
        reward = [[] for _ in range(10)]

        # 保存需要决策智能体的obs和reward
        for agent_id in decision_steps:
            next_obs[agent_id-1] = decision_steps[agent_id].obs[0]
            reward[agent_id-1] = decision_steps[agent_id].reward

        # 保存已经死亡智能体的obs和reward，
        # terminal_obs_reward 是一个二维列表，如果某个列表中值不为空，说明对应索引的智能体死亡
        # 所以在下面这个循环里面，i表示索引，对应obs_reward如果不为空，说明死亡了，用死亡状态的奖励和状态作为next的值
        for i, obs_reward in enumerate(self.terminal_obs_reward):
            if obs_reward != []:
                next_obs[i] = obs_reward[0]
                reward[i] = obs_reward[1]
                # reward[i]  = 0

        next_obs = np.array(next_obs).reshape(10, 21)
        reward = np.array(reward)
        if self.step_num > self.max_steps:
            done = [1 for _ in range(10)]
        else:
            done = self.agent_isdead
        info = ""

        self.last_decision_steps = decision_steps
        self.last_terminal_steps = terminal_steps

        return next_obs, reward, done, info
        
    def close(self):
        self.env.close()

if __name__ == "__main__":
    # ============== 测试  ===============
    # env = Multi_scene("args")
    # obs = env.reset()
    # start_time = time.time()
    # for i in range(1000):
    #     # random_actions得到的是(len(decision_steps),actions_shape)
    #     # 它的维度小于10
    #     # 所以在step里面做索引会出现问题，如果是里面是9，就有问题
    #     actions = env.randam_actions()
    #     next_obs, reward, done, info = env.step(actions)
    #     end_time = time.time()
    #     print(i, done)

    #     if sum(done) == 10:
    #         print(i, reward)
    #         env.reset()
    #     start_time = time.time()

    env = Multi_scene("args")
    obs = env.reset()
    j = 1
    for i in range(1000):
        actions = env.randam_actions()
        # 传入actions的维度是（1，3）
        # obs的维度是（1，21）
        # reward的维度是(1,), reward不是一个矩阵
        next_obs, reward, done, info = env.step(actions)
        end_time = time.time()
        print(i, reward)

        if len(done) == sum(done):
            print("env reset : {}".format(j))
            j += 1
            print(i, reward)
            env.reset()
        start_time = time.time()
