import logging
import random
import subprocess
import time

import func_timeout
import gym
import numpy as np
from gym import spaces
from gym.envs.registration import EnvSpec

from env.env_client import EnvClient
from env.env_cmd import EnvCmd
from env.env_def import UnitType
from env.env_manager import EnvManager
from onpolicy.envs.mpe.core import Agent, Landmark, World
from onpolicy.envs.mpe.multi_discrete import MultiDiscrete
from record.attach2docker import net_todocker_init

# update bounds to center around agent
cam_range = 2


def connect_loop(rpyc_port):
    """根据映射出来的宿主机端口号rpyc_port，与容器内的仿真平台建立rpyc连接"""
    while True:
        try:
            env_client = EnvClient("127.0.0.1", rpyc_port)
            observation = env_client.get_observation()
            # print("连接前的环境检测：")
            # print(observation)
            """
            此处使用units来进行判断，若飞机没有起飞则死循环，更改使用airports进行判断
            """
            if len(observation["red"]["airports"]) != 0:
                return env_client
        except Exception as e:
            print(e)
            print("rpyc connect failed")
        time.sleep(3)


# environment for all agents in the multiagent world
# currently code assumes that no agents will be created/destroyed at runtime!
class MultiAgentEnv(gym.Env):
    metadata = {"render.modes": ["human", "rgb_array"]}

    def __init__(
        self,
        # world,
        # reset_callback=None,
        # reward_callback=None,
        # observation_callback=None,
        # info_callback=None,
        all_args,
        config_8,
        port,
        done_callback=None,
        post_step_callback=None,
        shared_viewer=True,
        discrete_action=True,
    ):
        self.world_length = all_args.episode_length
        self.current_step = 0

        ### config parameters ###
        self.all_args = all_args
        self.num_agents = all_args.num_agents
        self.num_enemys = all_args.num_enemys
        self.obs_dim = 4 * (self.num_agents + self.num_enemys)
        self.test_obs = np.zeros(8)
        ###### word parameters ######
        self.dim_c = 0
        self.dim_p = 2
        self.collaborative = True
        self.discrete_action = False
        # add agents
        self.agents = [Agent() for i in range(self.num_agents)]
        for i, agent in enumerate(self.agents):
            agent.name = "agent %d" % i
            agent.collide = True
            agent.silent = True
            agent.size = 0.15
        self.ememy_agents = [Agent() for i in range(self.num_enemys)]
        for i, agent in enumerate(self.ememy_agents):
            agent.name = "ememy_agents %d" % i
            agent.collide = True
            agent.silent = True
            agent.size = 0.15
        # set required vectorized gym env property
        self.n = self.num_agents
        # scenario callbacks

        self.done_callback = done_callback

        self.post_step_callback = post_step_callback

        # environment parameters
        # self.discrete_action_space = True
        self.discrete_action_space = discrete_action

        # if true, action is a number 0...N, otherwise action is a one-hot N-dimensional vector
        self.discrete_action_input = False
        # if true, even the action is continuous, action will be performed discretely
        self.force_discrete_action = self.discrete_action
        # in this env, force_discrete_action == False��because world do not have discrete_action

        # if true, every agent has the same reward
        self.shared_reward = self.collaborative
        # self.shared_reward = False
        self.time = 0

        # configure spaces
        self.action_space = []
        self.observation_space = []
        self.share_observation_space = []
        share_obs_dim = 0
        for agent in self.agents:
            total_action_space = []

            # physical action space
            if agent.movable:
                if self.discrete_action_space:
                    u_action_space = spaces.Discrete(self.dim_p * 2)
                else:
                    u_action_space = spaces.Box(
                        low=-agent.u_range,
                        high=+agent.u_range,
                        shape=(self.dim_p,),
                        dtype=np.float32,
                    )  # [-1,1]
                total_action_space.append(u_action_space)

            # communication action space
            if not agent.silent:
                if self.discrete_action_space:
                    c_action_space = spaces.Discrete(self.dim_c)
                else:
                    c_action_space = spaces.Box(
                        low=0.0, high=1.0, shape=(self.dim_c,), dtype=np.float32
                    )  # [0,1]
                total_action_space.append(c_action_space)

            # total action space
            if len(total_action_space) > 1:
                # all action spaces are discrete, so simplify to MultiDiscrete action space
                if all(
                    [
                        isinstance(act_space, spaces.Discrete)
                        for act_space in total_action_space
                    ]
                ):
                    act_space = MultiDiscrete(
                        [[0, act_space.n - 1] for act_space in total_action_space]
                    )
                else:
                    act_space = spaces.Tuple(total_action_space)
                self.action_space.append(act_space)
            else:
                self.action_space.append(total_action_space[0])

            # observation space
            obs_dim = self.obs_dim
            share_obs_dim += obs_dim
            self.observation_space.append(
                spaces.Box(
                    low=-np.inf, high=+np.inf, shape=(obs_dim,), dtype=np.float32
                )
            )  # [-inf,inf]
            agent.action.c = np.zeros(self.dim_c)

        self.share_observation_space = [
            spaces.Box(
                low=-np.inf, high=+np.inf, shape=(share_obs_dim,), dtype=np.float32
            )
            for _ in range(self.n)
        ]

        # rendering
        self.shared_viewer = shared_viewer
        if self.shared_viewer:
            self.viewers = [None]
        else:
            self.viewers = [None] * self.n

        ######Init 88#####
        self.init_88(port, **config_8)

    ##### 88 init #######
    def init_88(self, env_id, server_port, agents, config, replay, speed):
        """对战环境初始化"""
        ########################Env Information########################
        # random.seed(os.getpid() + env_id) # 每次环境的结果都不同
        random.seed(env_id)
        self.step_episode = 0
        self.env_id = env_id
        self.server_port = server_port + env_id
        self.config = config
        self.volume_list = self.config["volume_list"]
        self.max_game_len = self.config["max_game_len"]
        self.speed = speed
        scene_name = self.config["scene_name"]
        prefix = self.config["prefix"]
        image_name = self.config["image_name"]
        # 构建管理本地容器化仿真环境的实例对象
        self.env_manager = EnvManager(
            self.env_id, server_port, scene_name, prefix, image_name=image_name
        )
        self.env_client = None
        ##################### Agent Information ############################
        self.agents_conf = agents
        self.agents_RL = self._init_agents()
        ##################### logger ############################
        # 记录出错信息
        logger_name = "exceptions"
        self.logger = logging.getLogger(logger_name)
        self.logger.setLevel(level=logging.DEBUG)
        log_path = "./logs.txt"
        handler = logging.FileHandler(log_path)
        handler.setLevel(logging.DEBUG)
        self.logger.addHandler(handler)
        # 回放记录与保存路径
        self.save_replay = replay["save_replay"]
        self.replay_dir = replay["replay_dir"]

        # 结束信息
        self.distance = 0
        self.time_not_find = 0

        # 上次决策时刻(增加暂停机制, 初值置10是为了防止平台未初始化完成导致舰船没法正确部署))
        self.last_time = 10

        print("————————————仿真环境初始化完成！————————————")
        """对战调度程序"""
        # 启动仿真环境, 与服务端建立rpyc连接
        self._start_env()
        print("————————————仿真环境启动完成！————————————")
        self.env_client = connect_loop(self.env_manager.get_server_port())
        self.env_client.take_action([EnvCmd.make_simulation("SPEED", "", self.speed)])
        print("————————————仿真环境远程连接完成！————————————")
        self.reset_88()

    def _run_env(self, request_period=10):
        """默认仿真时间每10秒决策一次"""
        new_time = self.env_client.get_time()
        self.env_client.take_action([EnvCmd.make_simulation("RESUME", "", "")])
        while new_time - self.last_time < request_period:
            time.sleep(0.01)
            new_time = self.env_client.get_time()
        self.last_time = new_time
        self.env_client.take_action([EnvCmd.make_simulation("PAUSE", "", new_time)])

    def reset_88(self):
        self._run_env()
        # 开启记录本轮数据的两个线程
        if self.save_replay:
            data_port = self.env_manager.get_data_port()
            folder = self.replay_dir
            net_todocker_init(
                "127.0.0.1",
                data_port,
                self.agents_RL[0].name,
                self.agents_RL[1].name,
                self.step_episode,
                folder,
            )
            self.step_episode += 1

    def _init_agents(self):
        """根据配置信息构建红蓝双方智能体"""
        agents = []
        for name, agent_conf in self.agents_conf.items():
            cls = agent_conf["class"]
            if agent_conf["side"] == "red":
                agent = cls(name, self.all_args, agent_conf)
            else:
                agent = cls(name, agent_conf)
            agents.append(agent)

        self.init_deploy = {"red": [], "blue": []}
        return agents

    def _start_env(self):
        """启动仿真环境"""
        # 查找是否存在同名的旧容器, 有的话先删除再启动新环境
        docker_name = "env_{}".format(self.env_id)
        docker_search = "docker ps -a --filter name=^/{}$".format(docker_name)
        # 捕获终端输出结果
        p = subprocess.Popen(docker_search, stdout=subprocess.PIPE, shell=True)
        out, err = p.communicate()
        # decode()将bytes对象转成str对象, strip()删除头尾字符空白符
        # split()默认以分隔符, 包括空格, 换行符\n, 制表符\t来对字符串进行分割
        out_str = out.decode()
        str_split = out_str.strip().split()
        if docker_name in str_split:
            print("存在同名旧容器,先删除之\n", out_str)
            self.env_manager.stop_docker()
        self.env_manager.start_docker(self.volume_list)  # 启动新环境
        time.sleep(5)

    def _validate_airport(self, airport_id, speed, action, obs_own):
        """判断机场相关指令的有效性"""
        airports = obs_own["airports"]
        airport = [u for u in airports if u["ID"] == airport_id]
        # 根据机场情况判断指令合法性
        maintype = action["maintype"]
        if len(airport) == 0:
            print("无效机场编号%s" % airport_id)
            return True
        elif maintype == "returntobase":
            return False
        else:
            obj = airport[0]
            if obj["DA"] > 0:
                print("机场%s修复中无法执行起飞指令" % obj["ID"])
                return True
            if maintype == "takeoffprotect":
                fly_type = 11  # 起飞护航指令默认起飞歼击机
            elif maintype in ["takeoffareahunt", "takeofftargethunt"]:
                fly_type = 15  # 起飞突击类指令默认起飞轰炸机
            else:
                fly_type = action["fly_type"]
            fly_num = action["fly_num"]
            if int(fly_type) not in type4cmd[maintype]:
                print("机场无法起降类型为%s的单位" % fly_type)
                return True
            type_map = {11: "AIR", 12: "AWCS", 13: "JAM", 14: "UAV", 15: "BOM"}
            attr = type_map[fly_type]
            if fly_num > obj[attr]:
                print("指令>>>", action)
                print("起飞数量%d大于机场可起飞数量%d" % (fly_num, obj[attr]))
                return True
            # 检查速度设置是否越界
            if speed is not None and self._validate_speed(fly_type, speed):
                return True
        return False

    @staticmethod
    def _validate_target_id(target_id, obs_own):
        """判断目标编号是否合法"""
        # 所有指令的目标编号, 必须是敌方单平台号
        unit = [u for u in obs_own["qb"] if u["ID"] == target_id]
        if len(unit) == 0:
            print("无效目标平台编号%s" % target_id)
            return True
        return False

    @staticmethod
    def _validate_speed(unit_type, speed):
        """判断速度设置是否越界(单位: m/s)，范围适当放宽"""
        speed_range = {
            11: [0, 1000],  # 歼击机速度约为800-1000km/h
            12: [600, 800],  # 预警机速度约为600-800km/h
            13: [100, 250],  # 预警机速度约为600-800km/h
            14: [50, 100],  # 无人机速度约为180-350km/h
            15: [100, 250],  # 轰炸机速度约为600-800km/h
            21: [0, 20],  # 舰船速度约为0-30节, 等价于0-54km/h
            31: [0, 30],  # 地防速度约为0-90km/h
        }
        sp_limit = speed_range[unit_type]
        if speed < sp_limit[0] or speed > sp_limit[1]:
            print("类型为%d的单位速度%d越界" % (unit_type, speed))
            return True
        return False

    def _validate_self_id(self, maintype, self_id, speed, obs_own):
        """判断执行主体是否有效"""
        # 空中拦截指令(make_airattack)执行主体只能是单平台(需要选手考虑目标分配问题)
        # 返航指令(make_returntobase)执行主体可以是单平台也可以是编队(考虑编队内单机油量不足提前返航)
        # 其他指令执行主体原则上必须是编队, 为防止出错目前服务端也支持给单平台下指令.
        unit = [u for u in obs_own["units"] if u["ID"] == self_id]
        team = [u for u in obs_own["teams"] if u["TMID"] == self_id]
        if maintype == "airattack":
            if len(unit) == 0:
                print("无效平台编号%s" % self_id)
                return True
        else:
            if len(unit) == 0 and len(team) == 0:
                print("无效执行主体编号%s" % self_id)
                return True
            else:
                obj = unit[0] if len(unit) > 0 else team[0]
                if obj["LX"] not in type4cmd[maintype]:
                    print(
                        "类型为%s的平台或者编队%s无法执行%s指令"
                        % (obj["LX"], self_id, maintype)
                    )
                    return True
                # 检查速度设置是否越界
                if speed is not None and self._validate_speed(obj["LX"], speed):
                    return True
        return False

    @staticmethod
    def _validate_cov_id(cov_id, obs_own):
        """护航对象是否有效"""
        # 护航对象编号, 必须为己方编队号
        team = [u for u in obs_own["teams"] if u["TMID"] == cov_id]
        if len(team) == 0:
            print("无效护航对象编号%s" % cov_id)
            return True
        else:
            type4cov = [12, 13, 15]
            if int(team[0]["LX"]) not in type4cov:
                print("非法护航目标类型%s" % team[0]["LX"])
                return True
        return False

    def _action_validate(self, action, obs_own, side, sim_time):
        """指令有效性基本检查"""
        # 检查项: 执行主体, 目标, 护航对象, 机场相关指令是否有效,
        # 同时检查速度设置是否越界
        # 检查项: 地防/护卫舰的初始部署位置限在己方半场距中线120km以上, 且只允许在开局2分钟内进行一次调整;
        results = []
        maintype = action["maintype"]
        if "self_id" in action:
            speed = int(action["speed"]) if "speed" in action else None
            self_id = int(action["self_id"])
            results.append(self._validate_self_id(maintype, self_id, speed, obs_own))
        if "target_id" in action:
            target_id = int(action["target_id"])
            results.append(self._validate_target_id(target_id, obs_own))
        if "cov_id" in action:
            cov_id = int(action["cov_id"])
            results.append(self._validate_cov_id(cov_id, obs_own))
        if "airport_id" in action:
            airport_id = int(action["airport_id"])
            speed = int(action["speed"]) if "speed" in action else None
            results.append(self._validate_airport(airport_id, speed, action, obs_own))
        # 初始部署放在最后检查, 如果前面已经出错则这条不用检查
        # 如果前面检查没错, 则继续检查初始部署是否有效, 有效则记录该对象已经部署过
        if maintype in ["Ship_Move_Deploy", "Ground_Deploy"] and not any(results):
            results.append(self._validate_deploy(side, action, sim_time))
        # 返回指令检查结果
        return True if any(results) else False  # 返回True说明发现错误

    def _validate_deploy(self, side, action, sim_time):
        pos_x = action["point_x"]
        if side == "red":
            if (
                sim_time > 120
                or pos_x < 120000
                or action["self_id"] in self.init_deploy["red"]
            ):
                print("红方护卫舰初始部署指令无效")
                return True  # 返回True, 说明指令检查发现错误(下同)
            else:
                self.init_deploy["red"].append(action["self_id"])
        elif side == "blue":
            if (
                sim_time > 120
                or pos_x > -120000
                or action["self_id"] in self.init_deploy["blue"]
            ):
                print("蓝方护卫舰初始部署指令无效")
                return True
            else:
                self.init_deploy["blue"].append(action["self_id"])
        return False

    # @func_timeout.func_set_timeout(3.0)
    def _agent_step(self, agent, sim_time, num_step, obs):
        is_red = False
        if agent.side == "red":
            action_n = num_step
            actions, is_red = agent.step(sim_time, action_n, obs)
        else:
            actions = agent.step(sim_time, obs)
        return actions, is_red

    def _run_agents(self, observation, num_step, action_n):
        """调用智能体的step方法生成指令, 然后发送指令"""
        sim_time = observation["sim_time"]
        obs_red = observation["red"]
        obs_blue = observation["blue"]
        actions_red = []
        actions_blue = []
        for agent in self.agents_RL:
            side = agent.side
            if side == "red":
                try:
                    actions_red, is_red = self._agent_step(
                        agent, sim_time, action_n, observation
                    )
                except func_timeout.exceptions.FunctionTimedOut:
                    print("red agent time out at this step")
                    self.logger.debug(
                        "在{}秒时红方智能体决策超时\n".format(sim_time)
                    )  # 记录超时信息
                # 指令检查
                indexes2del = []
                for idx, action in enumerate(actions_red):
                    if self._action_validate(action, obs_red, side, sim_time):
                        self.logger.debug(
                            "在{}秒时删除红方无效指令{}\n".format(sim_time, action)
                        )  # 记录无效指令
                        indexes2del.append(idx)
                actions_red = [
                    actions_red[i]
                    for i in range(len(actions_red))
                    if i not in indexes2del
                ]
            else:
                try:
                    actions_blue, _ = self._agent_step(
                        agent, sim_time, num_step, observation
                    )
                except func_timeout.exceptions.FunctionTimedOut:
                    print("blue agent time out at this step")
                    self.logger.debug(
                        "在{}秒时蓝方智能体决策超时\n".format(sim_time)
                    )  # 记录超时信息
                # 指令检查
                indexes2del = []
                for idx, action in enumerate(actions_blue):
                    if self._action_validate(action, obs_blue, side, sim_time):
                        self.logger.debug(
                            "在{}秒时删除蓝方无效指令{}\n".format(sim_time, action)
                        )  # 记录无效指令
                        indexes2del.append(idx)
                actions_blue = [
                    actions_blue[i]
                    for i in range(len(actions_blue))
                    if i not in indexes2del
                ]
        self._run_actions(actions_red + actions_blue)
        return is_red

    def _run_actions(self, actions):
        """客户端向服务端发送指令"""
        self.env_client.take_action(actions)

    def _get_observation(self):
        """态势获取"""
        return self.env_client.get_observation()

    def _reset(self):
        """智能体重置"""
        for agent in self.agents_RL:
            agent.reset()
        # 记录已经作过初始部署的编队/平台列表
        self.init_deploy = {"red": [], "blue": []}
        # 上次智能体决策时刻重置
        self.last_time = 10

    ##########
    def dict_to_obs(self, observation):
        obs = np.zeros(
            (self.num_agents * 4 + self.num_enemys * 4),
            dtype=np.float32,
        )
        self_a2a = []
        enemy_a2a = []
        i = 0
        my_a2a = 0
        for unit in observation["red"]["units"]:
            if unit["LX"] == UnitType.A2A:
                obs[i] = unit["X"] / 100000
                i += 1
                obs[i] = unit["Y"] / 100000
                i += 1
                obs[i] = unit["HX"] / 360 * 2 * np.pi
                i += 1
                obs[i] = unit["SP"]
                i += 1
                my_a2a += 1
                self_a2a.append(unit)
        i = self.num_agents * 4
        for unit in observation["blue"]["units"]:
            if unit["LX"] == UnitType.A2A:
                obs[i] = unit["X"] / 100000
                obs[i] = 0
                i += 1
                obs[i] = unit["Y"] / 100000
                obs[i] = 0
                i += 1
                obs[i] = unit["HX"] / 360 * 2 * np.pi
                obs[i] = 0
                i += 1
                obs[i] = unit["SP"]
                obs[i] = 0
                i += 1
                enemy_a2a.append(unit)
        return obs

    def seed(self, seed=None):
        if seed is None:
            np.random.seed(1)
        else:
            np.random.seed(seed)

    # step  this is  env.step()
    def step(self, action_n):
        self.current_step += 1
        obs_n = []
        reward_n = []
        done_n = []
        info_n = []
        # set action for each agent
        # for i, agent in enumerate(self.agents):
        #     self._set_action(action_n[i], agent, self.action_space[i])
        # advance world state
        # self.world.step()  # core.step()
        observation = self._get_observation()  # 获取态势
        print("仿真时间" + str(observation["sim_time"]))
        is_red = False
        while is_red == False:
            is_red = self._run_agents(observation, action_n, action_n)  # 发送指令
            observation_dot = self._get_observation()  # 获取态势
            observation = observation_dot.copy()
            self._run_env()
        # self._run_env()
        # observation_dot = self._get_observation()  # 获取态势
        # for i in range(3):
        #     self.test_obs[2 * i] = np.argmax(action_n[i])
        #     self.test_obs[2 * i + 1] = np.argmax(action_n[i])
        # record observation for each agent
        for i in range(self.num_agents):
            obs_n.append(self._get_obs(observation_dot))
            # obs_n.append(self.test_obs)

            reward_n.append([self._get_reward(observation_dot, action_n, i)])
            done_n.append(self._get_done(observation_dot, i))
            info = {"individual_reward": self._get_reward(observation_dot, action_n, i)}
            # env_info = self._get_info(agent)
            # if "fail" in env_info.keys():
            #     info["fail"] = env_info["fail"]
            info_n.append(info)

        # all agents get total reward in cooperative case, if shared reward, all agents have the same reward, and reward is sum
        reward = np.sum(reward_n)
        if self.shared_reward:
            reward_n = [[reward]] * self.n

        # if self.post_step_callback is not None:
        #     self.post_step_callback(self.world)

        return obs_n, reward_n, done_n, info_n

    def reset(self):
        self.current_step = 0
        # reset world
        # 对战结束后环境重置
        # 环境重置
        self.env_manager.reset()
        self.env_client = connect_loop(self.env_manager.get_server_port())
        self.env_client.take_action([EnvCmd.make_simulation("SPEED", "", self.speed)])
        self._reset()  # 智能体重置
        # record observations for each agent
        obs_n = []
        self.agents = [Agent() for i in range(self.num_agents)]
        for i, agent in enumerate(self.agents):
            agent.name = "agent %d" % i
            agent.collide = True
            agent.silent = True
            agent.size = 0.15
        self.reset_88()
        observation = self._get_observation()
        self.test_obs = np.zeros(8)
        for agent in self.agents:
            obs_n.append(self._get_obs(observation))
            # obs_n.append(self.test_obs)

        return obs_n

    # get info used for benchmarking
    def _get_info(self, agent):
        if self.info_callback is None:
            return {}
        return self.info_callback(agent, self.world)

    # get observation for a particular agent
    def _get_obs(self, observation_dot):
        return self.dict_to_obs(observation_dot)

    # get dones for a particular agent
    # unused right now -- agents are allowed to go beyond the viewing screen
    def _get_done(self, obs, id):
        # obs = self.dict_to_obs(obs)
        # if self.current_step > 2:
        #     distance = np.sqrt(
        #         (obs[4 * id] - obs[-4]) ** 2 + (obs[4 * id + 1] - obs[-3]) ** 2
        #     )
        #     if distance < 0.3:
        #         return np.array(True)
        return np.array(False)

    def is_point_in_triangle(self, px, py, x1, y1, x2, y2, x3, y3):
        # 计算向量
        v0 = np.array([x3 - x1, y3 - y1])
        v1 = np.array([x2 - x1, y2 - y1])
        v2 = np.array([px - x1, py - y1])

        # 计算叉积
        cross1 = np.cross(v0, v1)
        cross2 = np.cross(v0, v2)
        cross3 = np.cross(v1, v2)

        # 判断目标点是否在三角形内部
        return (cross1 * cross2 >= 0) and (cross1 * cross3 >= 0)

    def reward_if_surrounded(self, target, points, reward):
        x1, y1 = points[0]
        x2, y2 = points[1]
        x3, y3 = points[2]
        tx, ty = target

        if self.is_point_in_triangle(tx, ty, x1, y1, x2, y2, x3, y3):
            return reward
        else:
            return 0

    # get reward for a particular agent
    def _get_reward(self, obs, action_n, id):
        obs = self.dict_to_obs(obs)
        # action = np.argmax(action_n[id])
        distance = np.sqrt(
            (obs[4 * id] - obs[-4]) ** 2 + (obs[4 * id + 1] - obs[-3]) ** 2
        )
        reward = -distance
        if distance < 0.5:
            surr_reward = self.reward_if_surrounded(
                [obs[-4], obs[-3]],
                [[obs[0], obs[1]], [obs[4], obs[5]], [obs[8], obs[9]]],
                50,
            )
            reward += surr_reward
        trap_list = [[0.02, 0], [0, 0.02], [-0.02, 0], [0, -0.02], [0, 0]]
        agent_act = []
        for i in range(self.num_agents):
            actions = np.argmax(action_n[i])
            x1, y1 = (
                obs[i * 4] + trap_list[actions][0],
                obs[i * 4 + 1] + trap_list[actions][1],
            )
            agent_act.append([x1, y1])
        surr_reward = self.reward_if_surrounded(
            [obs[-4], obs[-3]],
            [agent_act[0], agent_act[1], agent_act[2]],
            10,
        )
        reward += surr_reward
        return reward

    # set env action for a particular agent
    def _set_action(self, action, agent, action_space, time=None):
        agent.action.u = np.zeros(self.dim_p)
        agent.action.c = np.zeros(self.dim_c)
        # process action
        if isinstance(action_space, MultiDiscrete):
            act = []
            size = action_space.high - action_space.low + 1
            index = 0
            for s in size:
                act.append(action[index : (index + s)])
                index += s
            action = act
        else:
            action = [action]
        if agent.movable:
            # physical action
            if self.discrete_action_input:
                agent.action.u = np.zeros(self.dim_p)
                # process discrete action
                if action[0] == 1:
                    agent.action.u[0] = -1.0
                if action[0] == 2:
                    agent.action.u[0] = +1.0
                if action[0] == 3:
                    agent.action.u[1] = -1.0
                if action[0] == 4:
                    agent.action.u[1] = +1.0
                d = self.dim_p
            else:
                if self.discrete_action_space:
                    agent.action.u[0] += action[0][1] - action[0][2]
                    agent.action.u[1] += action[0][3] - action[0][4]
                    d = 5
                else:
                    if self.force_discrete_action:
                        p = np.argmax(action[0][0 : self.dim_p])
                        action[0][:] = 0.0
                        action[0][p] = 1.0
                    agent.action.u = action[0][0 : self.dim_p]
                    d = self.dim_p

            sensitivity = 5.0
            if agent.accel is not None:
                sensitivity = agent.accel
            agent.action.u *= sensitivity

            if (not agent.silent) and (not isinstance(action_space, MultiDiscrete)):
                action[0] = action[0][d:]
            else:
                action = action[1:]

        if not agent.silent:
            # communication action
            if self.discrete_action_input:
                agent.action.c = np.zeros(self.dim_c)
                agent.action.c[action[0]] = 1.0
            else:
                agent.action.c = action[0]

            action = action[1:]

        # make sure we used all elements of action
        assert len(action) == 0

    # reset rendering assets
    def _reset_render(self):
        self.render_geoms = None
        self.render_geoms_xform = None

    def render(self, mode="human", close=False):
        if close:
            # close any existic renderers
            for i, viewer in enumerate(self.viewers):
                if viewer is not None:
                    viewer.close()
                self.viewers[i] = None
            return []

        if mode == "human":
            alphabet = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
            message = ""
            for agent in self.world.agents:
                comm = []
                for other in self.world.agents:
                    if other is agent:
                        continue
                    if np.all(other.state.c == 0):
                        word = "_"
                    else:
                        word = alphabet[np.argmax(other.state.c)]
                    message += other.name + " to " + agent.name + ": " + word + "   "
            print(message)

        for i in range(len(self.viewers)):
            # create viewers (if necessary)

            if self.viewers[i] is None:
                # import rendering only if we need it (and don't import for headless machines)
                # from gym.envs.classic_control import rendering
                from . import rendering

                self.viewers[i] = rendering.Viewer(700, 700)

        # create rendering geometry
        if self.render_geoms is None:
            # import rendering only if we need it (and don't import for headless machines)
            # from gym.envs.classic_control import rendering
            from . import rendering

            self.render_geoms = []
            self.render_geoms_xform = []

            self.comm_geoms = []

            for entity in self.world.entities:
                geom = rendering.make_circle(entity.size)
                xform = rendering.Transform()

                entity_comm_geoms = []

                if "agent" in entity.name:
                    geom.set_color(*entity.color, alpha=0.5)

                    if not entity.silent:
                        dim_c = self.world.dim_c
                        # make circles to represent communication
                        for ci in range(dim_c):
                            comm = rendering.make_circle(entity.size / dim_c)
                            comm.set_color(1, 1, 1)
                            comm.add_attr(xform)
                            offset = rendering.Transform()
                            comm_size = entity.size / dim_c
                            offset.set_translation(
                                ci * comm_size * 2 - entity.size + comm_size, 0
                            )
                            comm.add_attr(offset)
                            entity_comm_geoms.append(comm)

                else:
                    geom.set_color(*entity.color)
                    if entity.channel is not None:
                        dim_c = self.world.dim_c
                        # make circles to represent communication
                        for ci in range(dim_c):
                            comm = rendering.make_circle(entity.size / dim_c)
                            comm.set_color(1, 1, 1)
                            comm.add_attr(xform)
                            offset = rendering.Transform()
                            comm_size = entity.size / dim_c
                            offset.set_translation(
                                ci * comm_size * 2 - entity.size + comm_size, 0
                            )
                            comm.add_attr(offset)
                            entity_comm_geoms.append(comm)
                geom.add_attr(xform)
                self.render_geoms.append(geom)
                self.render_geoms_xform.append(xform)
                self.comm_geoms.append(entity_comm_geoms)
            for wall in self.world.walls:
                corners = (
                    (wall.axis_pos - 0.5 * wall.width, wall.endpoints[0]),
                    (wall.axis_pos - 0.5 * wall.width, wall.endpoints[1]),
                    (wall.axis_pos + 0.5 * wall.width, wall.endpoints[1]),
                    (wall.axis_pos + 0.5 * wall.width, wall.endpoints[0]),
                )
                if wall.orient == "H":
                    corners = tuple(c[::-1] for c in corners)
                geom = rendering.make_polygon(corners)
                if wall.hard:
                    geom.set_color(*wall.color)
                else:
                    geom.set_color(*wall.color, alpha=0.5)
                self.render_geoms.append(geom)

            # add geoms to viewer
            # for viewer in self.viewers:
            #     viewer.geoms = []
            #     for geom in self.render_geoms:
            #         viewer.add_geom(geom)

            for viewer in self.viewers:
                viewer.geoms = []
                for geom in self.render_geoms:
                    viewer.add_geom(geom)
                for entity_comm_geoms in self.comm_geoms:
                    for geom in entity_comm_geoms:
                        viewer.add_geom(geom)

        results = []
        for i in range(len(self.viewers)):
            from . import rendering

            if self.shared_viewer:
                pos = np.zeros(self.world.dim_p)
            else:
                pos = self.agents[i].state.p_pos
            self.viewers[i].set_bounds(
                pos[0] - cam_range,
                pos[0] + cam_range,
                pos[1] - cam_range,
                pos[1] + cam_range,
            )
            # update geometry positions
            for e, entity in enumerate(self.world.entities):
                self.render_geoms_xform[e].set_translation(*entity.state.p_pos)
                if "agent" in entity.name:
                    self.render_geoms[e].set_color(*entity.color, alpha=0.5)

                    if not entity.silent:
                        for ci in range(self.world.dim_c):
                            color = 1 - entity.state.c[ci]
                            self.comm_geoms[e][ci].set_color(color, color, color)
                else:
                    self.render_geoms[e].set_color(*entity.color)
                    if entity.channel is not None:
                        for ci in range(self.world.dim_c):
                            color = 1 - entity.channel[ci]
                            self.comm_geoms[e][ci].set_color(color, color, color)

            # render to display or array
            results.append(self.viewers[i].render(return_rgb_array=mode == "rgb_array"))

        return results

    # create receptor field locations in local coordinate frame
    def _make_receptor_locations(self, agent):
        receptor_type = "polar"
        range_min = 0.05 * 2.0
        range_max = 1.00
        dx = []
        # circular receptive field
        if receptor_type == "polar":
            for angle in np.linspace(-np.pi, +np.pi, 8, endpoint=False):
                for distance in np.linspace(range_min, range_max, 3):
                    dx.append(distance * np.array([np.cos(angle), np.sin(angle)]))
            # add origin
            dx.append(np.array([0.0, 0.0]))
        # grid receptive field
        if receptor_type == "grid":
            for x in np.linspace(-range_max, +range_max, 5):
                for y in np.linspace(-range_max, +range_max, 5):
                    dx.append(np.array([x, y]))
        return dx


# 不同指令可执行主体的类型列表
type4cmd = {
    # 作战飞机
    "areapatrol": [11, 12, 13, 14, 15],
    "takeoffareapatrol": [11, 12, 13, 14, 15],
    "linepatrol": [11, 12, 13, 14, 15],
    "takeofflinepatrol": [11, 12, 13, 14, 15],
    "areahunt": [15],
    "takeoffareahunt": [15],
    "targethunt": [15],
    "takeofftargethunt": [15],
    "protect": [11],
    "takeoffprotect": [11],
    "airattack": [11],
    "returntobase": [11, 12, 13, 14, 15],
    # 地防
    "Ground_Add_Target": [31],
    "Ground_Remove_Target": [31],
    "GroundRadar_Control": [31],
    "Ground_Set_Direction": [31],
    "Ground_Move_Deploy": [31],
    # "Ground_Deploy": [31],    # 不开放使用
    # 护卫舰
    "Ship_Move_Deploy": [21],
    "Ship_areapatrol": [21],
    "Ship_Add_Target": [21],
    "Ship_Remove_Target": [21],
    "Ship_Radar_Control": [21],
    # 预警机
    "awcs_areapatrol": [12],
    "awcs_linepatrol": [12],
    "awcs_mode": [12],
    "awcs_radarcontrol": [12],
    "awcs_cancledetect": [12],
    # 干扰机
    "area_disturb_patrol": [13],
    "line_disturb_patrol": [13],
    "set_disturb": [13],
    "close_disturb": [13],
    "stop_disturb": [13],
    # 无人侦察机
    "uav_areapatrol": [14],
    "uav_linepatrol": [14],
    "uav_cancledetect": [14],
    # 地面雷达
    "base_radarcontrol": [32],
}
