import json, copy, re, os, inspect, os
import time
from time import sleep

import numpy as np
from Utils.tensor_ops import my_view, repeat_at
from Common.base_env import RawObsArray
# from Environment.Common.actionset_v3 import digitsToStrAction
from Common.agent import Agent
from Common.cssim_env_wrapper import CssimEnv, ScenarioConfig
from .cython_env_func import tear_num_arr
from Common.action_set_encoder import digit2act_dictionary, AgentPropertyDefaults
from Common.action_set_encoder import decode_action_as_string, digitsToStrAction
from config import GlobalConfig
from Common.route_and_environment import load_ScenarioConfig

# file_path = "TaskConfig\\examples\\cssim\\cssim_25vs25.jsonc"
# key1 = "Environment.cssim.SubTasks.CssimAgentEnvConf.py->SubTaskConfig"
# key2 = "agent_list"

# def init_position_helper(x_max, x_min, y_max, y_min, total, this):
#     n_col = np.ceil(np.sqrt(np.abs(x_max-x_min) * total / np.abs(y_max-y_min)))
#     n_row = np.ceil(total / n_col)
#
#     which_row = this // n_col
#     which_col = this % n_col
#
#     x = x_min + (which_col/n_col)*(x_max-x_min)
#     y = y_min + (which_row/n_row)*(y_max-y_min)
#     return x, y

class CssimCommonFn(CssimEnv):
    def __init__(self, rank) -> None:
        super().__init__(rank)
        self.train_mode = None
        self.current_step = None
        self.key_obj = None
        self.num_agent_each_team = None
        self.n_team_agent = None
        self.episode = 0
        self.max_episode = None
        self.agents = []
        self.target_position = None
        self.observation_space = self.make_obs(get_shape=True)
        # self.SubTaskConfig = SubTaskConfig
        self.SubTaskConfig = GlobalConfig.ScenarioConfig.SubTaskConfig
        # self.action_space = GlobalConfig.ScenarioConfig.n_actions
        self.Enable_Reward_Sharing = False  # env give reward of each team instead of agent
        self.AgentSettingArray = []
        self.AgentSettingAll = []
        self.uid_to_agent_dict = {}
        self.SubTaskConfig.agent_list = self.AgentSettingArray.copy()
        if hasattr(GlobalConfig.ScenarioConfig, 'Enable_Reward_Sharing'):
            self.Enable_Reward_Sharing = GlobalConfig.ScenarioConfig.Enable_Reward_Sharing

    # 2025/07/02：UE端加载想定，UE发送给Python实体的属性列表，Python按照这个列表生成Agent
    # def reset(self):
    #     """
    #         Reset function, it delivers reset command to unreal engine to spawn all agents
    #         环境复位,每个episode的开始会执行一次此函数中会初始化所有智能体
    #     """
    #     super().reset()
    #     self.t = 0
    #     pos_ro = np.random.rand() * 2 * np.pi
    #     # spawn agents
    #     AgentSettingArray = []
    #
    #     # count the number of agent in each team
    #     n_team_agent = {}
    #     for i, agent_info in enumerate(self.SubTaskConfig.agent_list):
    #         team = agent_info['team']
    #         if team not in n_team_agent: n_team_agent[team] = 0
    #         self.SubTaskConfig.agent_list[i]['uid'] = i
    #         self.SubTaskConfig.agent_list[i]['tid'] = n_team_agent[team]
    #         n_team_agent[team] += 1
    #
    #     self.n_team_agent = n_team_agent
    #     # push agent init info one by one
    #     for i, agent_info in enumerate(self.SubTaskConfig.agent_list):
    #         team = agent_info['team']
    #         agent_info['n_team_agent'] = n_team_agent[team]
    #         init_fn = getattr(self, agent_info['init_fn_name'])
    #         AgentSettingArray.append(init_fn(agent_info, pos_ro))
    #
    #     self.agents = [Agent(team=a['team'], team_id=a['tid'], uid=a['uid']) for a in self.SubTaskConfig.agent_list]
    #
    #     # refer to struct.cpp, FParsedDataInput
    #     resp = self.client.send_and_wait_reply(json.dumps({
    #         'valid': True,
    #         'DataCmd': 'reset',
    #         'NumAgents': len(self.SubTaskConfig.agent_list),
    #         'AgentSettingArray': AgentSettingArray,  # refer to struct.cpp, FAgentProperty
    #         'TimeStepMax': ScenarioConfig.MaxStepsPerEpisode,
    #         'TimeDilation': ScenarioConfig.TimeDilation,
    #         'FrameRate': ScenarioConfig.FrameRate,
    #         'TimeStep': 0,
    #         'Actions': None,
    #     }))
    #
    #     resp = json.loads(resp)
    #     print(len(resp['dataArr']))
    #     print(len(AgentSettingArray))
    #
    #     assert len(resp['dataArr']) == len(AgentSettingArray), "Illegal agent initial position. 非法的智能体初始化位置，一部分智能体没有生成."
    #
    #     # 添加设置目标点位置，设置最大局数  2024/09
    #     assert 'dataGlobal' in resp or 'rSVD1' in resp['dataGloble'], (f"The key dataGloble or rSVD1 dose not exist "
    #                                                                    f"in dictionary resp")
    #     # 从UE返回的态势数据中读取比赛模式中设置的最大局数和任务终点的坐标
    #     parts = resp['dataGlobal']['rSVD1'].split(';')
    #     episode_part = parts[0]
    #
    #     self.max_episode = GlobalConfig.max_episode
    #     # 如果是渲染模式，从渲染端读取设置的最大模拟局数
    #     if GlobalConfig.ScenarioConfig.render:
    #         self.max_episode = int(episode_part.split(':')[1])
    #         # setattr(GlobalConfig, 'max_episode', self.max_episode)
    #
    #     target_part = parts[1][len('EndPointLoc:'):]
    #     coordinates = target_part.split()
    #     x, y, z = None, None, None
    #     for coordinate in coordinates:
    #         key, value = coordinate.split('=')
    #         if key == 'X':
    #             x = float(value)
    #         elif key == 'Y':
    #             y = float(value)
    #         elif key == 'Z':
    #             z = float(value)
    #     self.target_position = np.array([x, y, z])
    #
    #     # if self.episode:
    #     #     print('episode: %d' % (self.episode + 1))
    #     #     self.episode += 1
    #     print('episode: %d' % (self.episode + 1))
    #     self.episode += 1
    #     return self.parse_response_ob_info(resp)

    def reset(self):
        """
            Reset function, it delivers reset command to unreal engine to spawn all agents
            环境复位,每个episode的开始会执行一次此函数中会初始化所有智能体
        """
        super().reset()
        self.t = 0
        pos_ro = np.random.rand() * 2 * np.pi

        # 2025/07/02 发送基础信息并收到UE端的初始化信息

        # start_time = time.time()
        resp = self.client.send_and_wait_reply(json.dumps({
            'valid': True,
            'DataCmd': 'reset_property',
            'NumAgents': 0,
            # 'AgentSettingArray': [1,2],  # refer to struct.cpp, FAgentProperty
            'TimeStepMax': ScenarioConfig.MaxStepsPerEpisode,
            'TimeDilation': ScenarioConfig.TimeDilation,
            'FrameRate': ScenarioConfig.FrameRate,
            'TimeStep': 0,
            'Actions': None,
        }))
        # end_time = time.time()
        # execution_time = end_time - start_time
        # print(f"reset property: {execution_time:.6f} 秒")

        resp = json.loads(resp)
        # count the number of agent in each team
        n_team_agent = {}
        self.uid_to_agent_dict = {}
        agent_property_list = resp['propertyArr']
        self.AgentSettingAll = resp['propertyArr'].copy()  # 2025/07 save all agent properties include Commander and neutral(白方，中立方）
        index = 0
        self.AgentSettingArray.clear()

        for i, agent_info in enumerate(agent_property_list):

            if agent_info['type'] == 'BaseCommander_C':
                continue

            team = agent_info['agentTeam']
            if team != 0 and team != 1:
                continue

            if team not in n_team_agent: n_team_agent[team] = 0
            self.AgentSettingArray.append(agent_info)

            self.AgentSettingArray[index]['uid'] = agent_info['uId']
            self.AgentSettingArray[index]['team'] = team
            self.AgentSettingArray[index]['tid'] = n_team_agent[team]
            self.AgentSettingArray[index]['type'] = agent_info['type']
            self.AgentSettingArray[index]['n_team_agent'] = n_team_agent[team]

            n_team_agent[team] += 1
            index += 1

        self.n_team_agent = n_team_agent
        # 2025/07 add-----modify begin
        self.num_agent_each_team = [self.n_team_agent[i] for i in range(len(self.n_team_agent))]
        # 2025/07 add-----modify begin

        self.AgentSettingArray.sort(key=lambda x: (x['team'], x['tid']))  # 2025/07 按照key 'team'和 key 'tid‘ 排序
        self.SubTaskConfig.agent_list = self.AgentSettingArray.copy()
        GlobalConfig.ScenarioConfig.SubTaskConfig.agent_list = self.AgentSettingArray.copy()

        # 更新Agent的数量
        self.n_agents = len(self.AgentSettingArray)
        self.agents.clear()
        self.agents = [Agent(team=a['team'], team_id=a['tid'], uid=a['uid'],
                             location=(a['initLocation']['x'],a['initLocation']['y'],a['initLocation']['z']),
                             rotation=(a['initRotator']['pitch'], a['initRotator']['roll'], a['initRotator']['yaw']),
                             velocity= (a['initVelocity']['x'],a['initVelocity']['y'],a['initVelocity']['z']),
                             health=a['agentHp'], yaw = a['initRotator']['yaw'], speed = a['maxMoveSpeed'])
                       for a in self.AgentSettingArray]

        # 'initLocation': {'x': 819.3129, 'y': -767.3079, 'z': 60.15041}

        # refer to struct.cpp, FParsedDataInput

        # start_time = time.time()
        resp = self.client.send_and_wait_reply(json.dumps({
            'valid': True,
            'DataCmd': 'reset_obs',
            'NumAgents': 0,
            # 'AgentSettingArray': [1,2],  # refer to struct.cpp, FAgentProperty
            'TimeStepMax': ScenarioConfig.MaxStepsPerEpisode,
            'TimeDilation': ScenarioConfig.TimeDilation,
            'FrameRate': ScenarioConfig.FrameRate,
            'TimeStep': 0,
            'Actions': None,
        }))
        # end_time = time.time()
        # execution_time = end_time - start_time
        # print(f"reset_obs: {execution_time:.6f} 秒")

        resp = json.loads(resp)
        assert len(resp['dataArr']) == len(self.AgentSettingArray), "Illegal agent initial position. 非法的智能体初始化位置，一部分智能体没有生成."
        # 构建uid列表
        target_uids = [agent['uid'] for agent in self.AgentSettingArray]
        # 构建 obs 字典（uid → obs元素）
        obs_dict = {item['uId']: item for item in resp['dataArr']}
        # 按照目标顺序重排 obs
        sorted_obs = [obs_dict[uid] for uid in target_uids]
        resp['dataArr'] = sorted_obs

        # 添加设置目标点位置，设置最大局数  2024/09
        assert 'dataGlobal' in resp or 'rSVD1' in resp['dataGloble'], (f"The key dataGloble or rSVD1 dose not exist "
                                                                       f"in dictionary resp")
        config_str = resp['dataGlobal']['rSVD1']
        episode, train_mode = config_str.split(';')
        self.max_episode = int(episode)
        self.train_mode = bool(int(train_mode))

        # # 从UE返回的态势数据中读取比赛模式中设置的最大局数和任务终点的坐标
        #         # parts = resp['dataGlobal']['rSVD1'].split(';')
        #         # episode_part = parts[0]
        #
        # self.max_episode = GlobalConfig.max_episode
        # # 如果是渲染模式，从渲染端读取设置的最大模拟局数
        # if GlobalConfig.ScenarioConfig.render:
        #     self.max_episode = int(episode_part.split(':')[1])
        #     # setattr(GlobalConfig, 'max_episode', self.max_episode)
        #
        # target_part = parts[1][len('EndPointLoc:'):]
        # coordinates = target_part.split()
        # x, y, z = None, None, None
        # for coordinate in coordinates:
        #     key, value = coordinate.split('=')
        #     if key == 'X':
        #         x = float(value)
        #     elif key == 'Y':
        #         y = float(value)
        #     elif key == 'Z':
        #         z = float(value)
        # self.target_position = np.array([x, y, z])
        print('episode: %d' % (self.episode + 1))
        self.episode += 1


        return self.parse_response_ob_info(resp)

    def step(self, act):
        """
            step 函数,act中包含了所有agent的决策
        """
        assert len(act) == self.n_agents

        # translate actions to the format recognized by unreal engine
        if self.SubTaskConfig.ActionFormat == 'Single-Digit':
            act_send = [digit2act_dictionary[a] for a in act]
        elif self.SubTaskConfig.ActionFormat == 'Multi-Digit':
            act_send = [decode_action_as_string(a) for a in act]
        elif self.SubTaskConfig.ActionFormat == 'ASCII':
            act_send = [digitsToStrAction(a) for a in act]
        else:
            act_send = [digitsToStrAction(a) for a in act]

        act_send = self.action_adaption(act_send)

        # simulation engine IO
        # send data filed {'valid', 'DataCmd', 'NumAgents','AgentSettingArray', 'TimeStepMax','TimeDilation','FrameRate', 'TimeStep','Actions','StringActions','RSVD1'}
        
        import random

        # start_time = time.time()
        try:
            resp = json.loads(self.client.send_and_wait_reply(json.dumps({
                'valid': True,
                'DataCmd': 'step',
                'TimeStep': self.t,
                'Actions': None,
                'StringActions': act_send,
                'RSVD1': 'None'            # Add 2025/05/29
            })))
        except TypeError:
            # 专门捕获"无法解包None"的错误
            return None  # 或返回特定标识表示接收失败
            # 其他异常可按需捕获（如需要）
        except Exception as e:
            return None
        # end_time = time.time()
        # execution_time = end_time - start_time
        # print(f"step time: {execution_time:.6f} 秒")

        # 2025/07 -------sort resp['dataArr'] according to the order of self.AgentSettingArray]  ->modify begin
        target_uids = [agent['uid'] for agent in self.AgentSettingArray]
        # 2. 构建 obs 字典（uid → obs元素）
        obs_dict = {item['uId']: item for item in resp['dataArr']}
        # 3. 按照目标顺序重排 obs
        sorted_obs = [obs_dict[uid] for uid in target_uids]
        resp['dataArr'] = sorted_obs
        
        self.current_step = resp['dataGlobal']['timeCnt']
        # 2025/07 -------sort resp['dataArr'] according to the order of self.AgentSettingArray]  ->modify end

        # get obs for RL, info for script AI
        ob, info = self.parse_response_ob_info(resp)

        # generate reward, get the episode ending information
        RewardForAllTeams, WinningResult = self.reward_and_done(resp)
        if WinningResult is not None:
            info.update(WinningResult)
            assert resp['dataGlobal']['episodeDone']
            done = True
        else:
            done = False

        if resp['dataGlobal']['timeCnt'] >= ScenarioConfig.MaxStepsPerEpisode:
            assert done
        # if self.rank == 0 and ScenarioConfig.js_render: self.simple_render_with_threejs()  # 2024/09修改
        return (ob, RewardForAllTeams, done, info)

    def action_adaption(self, action_list):

        agent_uid = np.nan
        for i, action in enumerate(action_list):
            agent_uid = self.agents[i].uid
            main_cmd, *rest = action.split(';')
            if main_cmd.split('::')[1] == 'NormalAttacking':
                team, tid = rest
                # target_agent = None
                for j, agent in enumerate(self.agents):
                    if agent.team == int(team) and agent.team_id == int(tid):
                        action_temp = 'ActionSet2::NormalAttacking;'+ str(agent.uid) + ';' +str(agent_uid)
                        action_list[i] = action_temp
            else:
                action_list[i] =  action + ';'+ str(agent_uid)
        return action_list

    def parse_event(self, event):
        """
            解析环境返回的一些关键事件,
            如智能体阵亡,某队伍胜利等等。
            关键事件需要在ue中进行定义.
            该设计极大地简化了python端奖励的设计流程,
            减小了python端的运算量。
        """
        if not hasattr(self, 'pattern'): self.pattern = re.compile(r'<([^<>]*)>([^<>]*)')
        return {k: v for k, v in re.findall(self.pattern, event)}

    def extract_key_gameobj(self, resp):
        """
            获取非智能体的仿真物件,例如重要landmark等
        """
        keyObjArr = resp['dataGlobal']['keyObjArr']
        return keyObjArr

    def reward_and_done(self, resp):
        """
            奖励的设计在此定义,
            建议:在UE端定义触发奖励的事件,如智能体阵亡、战术目标完成等,见parse_event
        """
        reward = np.zeros(shape=(self.n_teams, self.n_agents))
        if self.Enable_Reward_Sharing:
            reward = np.zeros(shape=self.n_teams)
        WinningResult = {
            "team_ranking": [-1, -1],
            "end_reason": ''
        }
        return reward, WinningResult

    def step_skip(self):
        """
            跳过一次决策,无用的函数
        """
        return self.client.send_and_wait_reply(json.dumps({
            'valid': True,
            'DataCmd': 'skip_frame',
        }))

    def find_agent_by_uid(self, uid):
        """
            用uid查找智能体(带缓存加速机制)
        """

        # self.uid_to_agent_dict = {}
        self.uid_to_agent_dict.update({agent.uid: agent for agent in self.agents})
        if isinstance(uid, str):
            self.uid_to_agent_dict.update({str(agent.uid): agent for agent in self.agents})

        # if not hasattr(self, 'uid_to_agent_dict'):
        #     self.uid_to_agent_dict = {}
        #     self.uid_to_agent_dict.update({agent.uid: agent for agent in self.agents})
        #     if isinstance(uid, str):
        #         self.uid_to_agent_dict.update({str(agent.uid): agent for agent in self.agents})
        return self.uid_to_agent_dict[uid]

    def parse_response_ob_info(self, resp):
        """
            粗解析智能体的观测,例如把死智能体的位置替换为inf(无穷远),
            将智能体的agentLocation从字典形式转变为更简洁的(x,y,z)tuple形式
        """
        assert resp['valid']
        resp['dataGlobal']['distanceMat'] = np.array(resp['dataGlobal']['distanceMat']['flat_arr']).reshape(
            self.n_agents, self.n_agents)

        # if len(resp['dataGlobal']['events']) > 0:
        #     tmp = [kv.split('>') for kv in resp['dataGlobal']['events'][0].split('<') if kv]
        #     info_parse = {t[0]: t[1] for t in tmp}

        info_dict = resp
        for info in info_dict['dataArr']:
            alive = info['agentAlive']

            if alive:
                agentLocation = info.pop('agentLocation')
                agentRotation = info.pop('agentRotation')
                agentVelocity = info.pop('agentVelocity')
                agentScale = info.pop('agentScale')
                info['agentLocationArr'] = (agentLocation['x'], agentLocation['y'], agentLocation['z'])
                info['agentVelocityArr'] = (agentVelocity['x'], agentVelocity['y'], agentVelocity['z'])
                info['agentRotationArr'] = (agentRotation['yaw'], agentRotation['pitch'], agentRotation['roll'])
                info['agentScaleArr'] = (agentScale['x'], agentScale['y'], agentScale['z'])
                info.pop('previousAction')
                info.pop('availActions')
                # info.pop('rSVD1')
                info.pop('interaction')
            else:
                inf = float('inf')
                info['agentLocationArr'] = (inf, inf, inf)
                info['agentVelocityArr'] = (inf, inf, inf)
                info['agentRotationArr'] = (inf, inf, inf)

        info = resp['dataArr']
        for i, agent_info in enumerate(info):
            self.agents[i].update_agent_attrs(agent_info)

        self.key_obj = self.extract_key_gameobj(resp)

        # 2025/07 Set CssimAgentEnvConf.py obs_n_entity according to Agent property list------modify begin 2025/08/12
        GlobalConfig.ScenarioConfig.SubTaskConfig.obs_n_entity = len(self.agents) + len(self.key_obj)
        GlobalConfig.ScenarioConfig.SubTaskConfig.keyobj_n_entity = len(self.key_obj)
        # 2025/07 Set CssimAgentEnvConf.py obs_n_entity according to Agent property list------modify end 2025/08/12

        # return ob, info
        info_dict.update({"AgentProperty": self.AgentSettingArray}) # 2025/07 in order to save AgentSettingArray in ScenarioConfig.SubTaskConfig.agent_list
        info_dict.update({"AgentPropertyAll": self.AgentSettingAll})   # 2025/07 in order to save AgentSettingAll in ScenarioConfig.SubTaskConfig.agent_list_all
        info_dict.update({"KeyObjNumber": len(self.key_obj)}) # 2025/07 in order to save KeyObjNumber in ScenarioConfig.SubTaskConfig.keyobj_n_entity
        info_dict.update({"TrainMode": self.train_mode}) # 2025/07 in order to save train_mode in GlobalConfig.train_mode

        return self.make_obs(resp), info_dict

    @staticmethod
    def item_random_mv(src, dst, prob, rand=False):
        """
            可能想是实现算子的随机移动
        """
        assert len(src.shape) == 1;
        assert len(dst.shape) == 1
        if rand: np.random.shuffle(src)
        len_src = len(src)
        n_mv = (np.random.rand(len_src) < prob).sum()
        item_mv = src[range(len_src - n_mv, len_src)]
        src = src[range(0, 0 + len_src - n_mv)]
        dst = np.concatenate((item_mv, dst))
        return src, dst

    @staticmethod
    def get_binary_array(n_int, n_bits=8, dtype=np.float32):
        """
            工具类，给定一个整数，转换后得到一个8位二进制的数组
        """
        arr = np.zeros((*n_int.shape, n_bits), dtype=dtype)
        for i in range(n_bits):
            arr[:, i] = (n_int % 2 == 1).astype(int)
            n_int = n_int / 2
            n_int = n_int.astype(np.int8)
        return arr

    def make_obs(self, resp=None, get_shape=False):
        # CORE_DIM = 38
        CORE_DIM = 23
        assert ScenarioConfig.obs_vec_length == CORE_DIM
        if get_shape:
            return CORE_DIM

        # temporary parameters
        OBS_RANGE_PYTHON_SIDE = 8000
        n_key_obj = self.SubTaskConfig.keyobj_n_entity

        # assert (self.SubTaskConfig.obs_n_entity -n_key_obj) % 2 == 0  # 减去一个core game objects，其他友方和敌方单位的数量相等， 2024/08 修改
        MAX_NUM_OPP_OBS = (self.SubTaskConfig.obs_n_entity-n_key_obj) // 2  # 设置obs中最大的敌方单位的数量， 2024/08 修改
        MAX_NUM_ALL_OBS = (self.SubTaskConfig.obs_n_entity-n_key_obj) // 2  # 设置obs中最大的友方单位的数量， 2024/08 修改

        # get and calculate distance array                                                            # 计算智能体的距离
        pos3d_arr = np.zeros(shape=(self.n_agents, 3), dtype=np.float32)
        for i, agent in enumerate(self.agents): pos3d_arr[i] = agent.pos3d
        # use the distance matrix calculated by unreal engine to accelerate
        # dis_mat = distance_matrix(pos3d_arr)    # dis_mat is a matrix, shape = (n_agent, n_agent)
        dis_mat = resp['dataGlobal']['distanceMat']
        alive_all = np.array([agent.alive for agent in self.agents])
        dis_mat[~alive_all, :] = +np.inf
        dis_mat[:, ~alive_all] = +np.inf

        # get team list
        team_belonging = np.array([agent.team for agent in self.agents])

        # gather the obs arr of all known agents
        obs_arr = RawObsArray(key='Agent')

        if not hasattr(self, "uid_binary"):
            self.uid_binary = self.get_binary_array(np.arange(self.n_agents), 10)

        for i, agent in enumerate(self.agents):  # 在态势中填入每个算子的状态
            assert agent.location is not None,("agent name:%s" % agent.uid)
            # assert agent.uid == i  # 新版本UID不是从0kais

            obs_arr.append(
                self.uid_binary[i]  # 0~9
            )
            obs_arr.append([
                agent.index,  # 10
                agent.team,  # 11
                agent.alive,  # 12
                agent.uid_remote,  # 13
            ])
            obs_arr.append(  #[14,15,16,17,18,19]
                agent.pos3d
                # tear_num_arr(agent.pos3d, n_digits=6, base=10, mv_left=0)
                # tear_num_arr(agent.pos3d, 6, ScenarioConfig.ObsBreakBase, 0) # 3 -- > 3*6 = 18 , 18-3=15, 23+15 = 38
            )
            obs_arr.append(
                agent.vel3d
            )
            obs_arr.append([
                agent.hp,
                agent.yaw,
                agent.max_speed,
            ])
        obs_ = obs_arr.get()
        new_obs = my_view(obs_, [self.n_agents, -1])

        assert CORE_DIM == new_obs.shape[-1]
        OBS_ALL_AGENTS = np.zeros(shape=(
            self.n_agents,
            MAX_NUM_OPP_OBS + MAX_NUM_ALL_OBS,
            CORE_DIM
        ))

        # now arranging the individual obs
        for i, agent in enumerate(self.agents):
            if not agent.alive:
                OBS_ALL_AGENTS[i, :] = np.nan  # 清除死亡的算子态势
                continue

            # if alive
            # scope <all>
            dis2all = dis_mat[i, :]
            is_ally = (team_belonging == agent.team)

            # scope <opp/hostile>
            a2h_dis = dis2all[~is_ally]  # 与敌方的距离
            h_alive = alive_all[~is_ally]  # 敌方是否存活
            h_feature = new_obs[~is_ally]  # new_obs[30*23],敌方的obs（5*23）
            h_iden_sort = np.argsort(a2h_dis)[:MAX_NUM_OPP_OBS]  # 与敌方距离的由近到远排序，输出排序索引，选择MAX_NUM_OPP_OBS个（5）敌方
            a2h_dis_sorted = a2h_dis[h_iden_sort]  # 由近到远排序的与敌方的距离,前五个
            h_alive_sorted = h_alive[h_iden_sort]  # 与敌方距离的由近到远排序,前五个，是否存活
            h_vis_mask = (a2h_dis_sorted <= OBS_RANGE_PYTHON_SIDE) & h_alive_sorted  # 判断是否超出默认设置的obs探测范围（1500），以及敌方是否存活

            # scope <all>
            h_vis_index = h_iden_sort[h_vis_mask]  #从h_iden_sort中选择距离小于1500，且存活的敌方单位的索引
            h_invis_index = h_iden_sort[~h_vis_mask]  #从h_iden_sort中选择距离大于1500（不可见），或死亡的敌方单位的索引
            h_vis_index, h_invis_index = self.item_random_mv(src=h_vis_index, dst=h_invis_index, prob=0,
                                                             rand=True)  # 大致是对可见并存活的敌方进行随机打乱
            h_ind = np.concatenate((h_vis_index, h_invis_index))  #拼接敌方可见单位索引和不可见标志数组
            h_msk = np.concatenate(
                (h_vis_index < 0, h_invis_index >= 0))  # "<0" project to False; ">=0" project to True；拼接不可见和存活的单位
            a2h_feature_sort = h_feature[h_ind]  # 选择h_ind对应的obs条目
            a2h_feature_sort[h_msk] = 0  # 不可见的单位的信息设置为0，数组中保存敌方可见单位的信息
            if len(a2h_feature_sort) < MAX_NUM_OPP_OBS:  # 不足数量不够MAX_NUM_OPP_OBS的信息，值为np.nan
                a2h_feature_sort = np.concatenate((
                    a2h_feature_sort,
                    # np.ones(shape=(MAX_NUM_OPP_OBS-len(a2h_feature_sort), CORE_DIM))+np.nan
                    np.zeros(shape=(MAX_NUM_OPP_OBS - len(a2h_feature_sort), CORE_DIM))
                ), axis=0)

            # scope <ally/friend>
            a2f_dis = dis2all[is_ally]  # 选择与友方的距离
            f_alive = alive_all[is_ally]  # 选择友方是否存活
            f_feature = new_obs[is_ally]  # 选择友方的obs
            f_iden_sort = np.argsort(a2f_dis)[:MAX_NUM_ALL_OBS]  #选择MAX_NUM_ALL_OBS（5个）从近到远的友方obs的索引（包括自身）
            a2f_dis_sorted = a2f_dis[f_iden_sort]  #选择MAX_NUM_ALL_OBS（5个）从近到远的友方obs
            f_alive_sorted = f_alive[f_iden_sort]  #选择a2f_dis_sorted中友方的存活状态
            f_vis_mask = (a2f_dis_sorted <= OBS_RANGE_PYTHON_SIDE) & f_alive_sorted  # a2f_dis_sorted中可见并且存活的单位

            # scope <all>
            f_vis_index = f_iden_sort[f_vis_mask]  #选择可见并存活单位的索引
            self_vis_index = f_vis_index[:1]  # seperate self and ally（自己）
            f_vis_index = f_vis_index[1:]  # seperate self and ally（其他友方）
            f_invis_index = f_iden_sort[~f_vis_mask]  #不可见的友方
            f_vis_index, f_invis_index = self.item_random_mv(src=f_vis_index, dst=f_invis_index, prob=0, rand=True)
            f_ind = np.concatenate((self_vis_index, f_vis_index, f_invis_index))  # 拼接（自己，可见的友方，不可见的友方）
            f_msk = np.concatenate((self_vis_index < 0, f_vis_index < 0,
                                    f_invis_index >= 0))  # "<0" project to False; ">=0" project to True（选择不可见的友方索引）
            self_ally_feature_sort = f_feature[f_ind]  #（自己，可见的友方，不可见的友方）的obs，数量为MAX_NUM_ALL_OBS个（5个），并且在探测范围内
            self_ally_feature_sort[f_msk] = 0  # 不可见的友方单位信息设置为0
            if len(self_ally_feature_sort) < MAX_NUM_ALL_OBS:  # 如果选择的友方单位不足MAX_NUM_ALL_OBS个（5个），则填充对应条目的信息为np.nan
                self_ally_feature_sort = np.concatenate((
                    self_ally_feature_sort,
                    # np.ones(shape=(MAX_NUM_ALL_OBS-len(self_ally_feature_sort), CORE_DIM))+np.nan
                    np.zeros(shape=(MAX_NUM_ALL_OBS - len(self_ally_feature_sort), CORE_DIM))
                ), axis=0)
            # 将选择的友方（self_ally_feature_sort）和敌方信息（a2h_feature_sort）在第一个维度拼接，结果为（10*23）
            OBS_ALL_AGENTS[i, :] = np.concatenate((self_ally_feature_sort, a2h_feature_sort), axis=0)

        # the last part of observation is the list of core game objects
        MAX_OBJ_NUM_ACCEPT = 1
        self.N_Obj = len(self.key_obj)

        OBJ_UID_OFFSET = 32768

        obs_arr = RawObsArray(key='GameObj')

        for i, obj in enumerate(self.key_obj):  # 关键点的信息也添加到态势中
            assert obj['uId'] - OBJ_UID_OFFSET == i
            obs_arr.append(
                -self.uid_binary[i]  # reverse uid binary, self.uid_binary[i]
            )
            obs_arr.append([
                obj['uId'] - OBJ_UID_OFFSET,  #agent.index,
                -1,  #agent.team,
                True,  #agent.alive,
                obj['uId'] - OBJ_UID_OFFSET,  #agent.uid_remote,
            ])
            # tear_num_arr(agent.pos3d, n_digits=6, base=10, mv_left=0)
            obs_arr.append(
                [
                    obj['location']['x'], obj['location']['y'], obj['location']['z']  # agent.pos3d
                ]
                # tear_num_arr([
                #     obj['location']['x'], obj['location']['y'], obj['location']['z']  # agent.pos3d
                # ], 6, ScenarioConfig.ObsBreakBase, 0)
            )

            obs_arr.append([
                               obj['velocity']['x'], obj['velocity']['y'], obj['velocity']['z']  # agent.vel3d
                           ] +
                           [
                               -1,  # hp
                               obj['rotation']['yaw'],  # yaw
                               0,  # max_speed
                           ])
        OBS_GameObj = my_view(obs_arr.get(), [len(self.key_obj), -1])[:MAX_OBJ_NUM_ACCEPT, :]
        OBS_GameObj = repeat_at(OBS_GameObj, insert_dim=0, n_times=self.n_agents)
        OBS_ALL_AGENTS = np.concatenate((OBS_ALL_AGENTS, OBS_GameObj), axis=1)

        return OBS_ALL_AGENTS

    # def simple_render_with_threejs(self):
    #     if self.rank != 0: return
    #     if not hasattr(self, 'threejs_bridge'):
    #         from VISUALIZE.mcom import mcom
    #         self.threejs_bridge = mcom(path='TEMP/v2d_logger/', digit=8, rapid_flush=False, draw_mode='Threejs')
    #         self.threejs_bridge.v2d_init()
    #         self.threejs_bridge.set_style('star')
    #         # self.threejs_bridge.set_style('grid')
    #         # self.threejs_bridge.set_style('grid3d')
    #         self.threejs_bridge.set_style('font', fontPath='/examples/fonts/ttf/FZYTK.TTF',
    #                                       fontLineHeight=1500)  # 注意不可以省略参数键值'fontPath=','fontLineHeight=' ！！！
    #         # self.threejs_bridge.set_style('gray')
    #
    #         self.threejs_bridge.time_cnt = 0
    #         self.threejs_bridge.geometry_rotate_scale_translate('box', 0, 0, 0, 3, 2, 1, 0, 0, 0)
    #         self.threejs_bridge.geometry_rotate_scale_translate('cone', 0, np.pi / 2, 0, 1.2, 0.9, 0.9, 1.5, 0,
    #                                                             0.5)  # x -> y -> z
    #         self.threejs_bridge.advanced_geometry_rotate_scale_translate('tower2', 'BoxGeometry(1,1,1)', 0, 0, 0, 0, 0,
    #                                                                      5, 0, 0, -4)  # 长方体
    #         self.threejs_bridge.advanced_geometry_rotate_scale_translate('ball', 'SphereGeometry(1)', 0, 0, 0, 1, 1, 1,
    #                                                                      0, 0, 0)  # 球体
    #
    #     for i, agent in enumerate(self.agents):
    #         if not agent.alive:
    #             color = 'black'
    #         else:
    #             if agent.team == 0: color = 'green'
    #             if agent.team == 1: color = 'blue'
    #         self.threejs_bridge.v2dx(
    #             'ball|%d|%s|%.2f' % (i, color, 1),
    #             agent.pos3d[0] / 100,
    #             agent.pos3d[1] / 100,
    #             agent.pos3d[2] / 100,
    #             ro_x=0, ro_y=-0, ro_z=0, ro_order='ZYX',  # rotation
    #             label=f'uid-{agent.uid}', label_color='white',
    #             opacity=1,
    #             track_n_frame=50
    #         )
    #     self.threejs_bridge.v2d_show()

    def init_ground(self, agent_info, pos_ro):
        N_COL = 4
        agent_class = agent_info['type']
        team = agent_info['team']
        n_team_agent = agent_info['n_team_agent']
        tid = agent_info['tid']
        uid = agent_info['uid']
        x = 0 + 800 * (tid - n_team_agent // 2) // N_COL
        y = (400 * (tid % N_COL) + 2000) * (-1) ** (team + 1)
        x, y = np.matmul(np.array([x, y]),
                         np.array([[np.cos(pos_ro), -np.sin(pos_ro)], [np.sin(pos_ro), np.cos(pos_ro)]]))
        z = 500  # 500 is slightly above the ground
        yaw = 90 if team == 0 else -90
        assert np.abs(x) < 15000.0 and np.abs(y) < 15000.0
        agent_property = copy.deepcopy(AgentPropertyDefaults)
        agent_property.update({
            'DebugAgent': False,
            # max drive/fly speed
            'MaxMoveSpeed': 720 if agent_class == 'RLA_CAR_Laser' else 600,
            # also influence object mass, please change it with causion!
            'AgentScale': {'x': 0.5, 'y': 0.5, 'z': 0.5, },
            # probability of escaping dmg 闪避
            "DodgeProb": 0.0,
            # ms explode dmg
            "ExplodeDmg": 20,
            # team belonging
            'AgentTeam': team,
            # choose ue class to init
            'ClassName': agent_class,
            # Weapon CD
            'WeaponCD': 1,
            # open fire range
            "PerceptionRange": 2000 if agent_class == 'RLA_CAR_Laser' else 2500,
            "GuardRange": 1400 if agent_class == 'RLA_CAR_Laser' else 1700,
            "FireRange": 750 if agent_class == 'RLA_CAR_Laser' else 1400,
            # debugging
            'RSVD1': '-Ring1=2000 -Ring2=1400 -Ring3=750' if agent_class == 'RLA_CAR_Laser' else '-Ring1=2500 -Ring2=1700 -Ring3=1400',
            # regular
            'RSVD2': '-InitAct=ActionSet2::Idle;AsFarAsPossible',
            # agent hp
            'AgentHp': np.random.randint(low=95, high=105) if agent_class == 'RLA_CAR_Laser' else np.random.randint(
                low=145, high=155),
            # the rank of agent inside the team
            'IndexInTeam': tid,
            # the unique identity of this agent in simulation system
            'UID': uid,
            # show color
            'Color': '(R=0,G=1,B=0,A=1)' if team == 0 else '(R=0,G=0,B=1,A=1)',
            # initial location
            'InitLocation': {'x': x, 'y': y, 'z': z, },
            # initial facing direction et.al.
            'InitRotator': {'pitch': 0, 'roll': 0, 'yaw': yaw, },
        }),
        return agent_property

    def init_location(self, agent_info, pos_ro):
        agent_class = agent_info['type']
        team = agent_info['team']
        n_team_agent = agent_info['n_team_agent']
        tid = agent_info['tid']
        uid = agent_info['uid']

        x = agent_info['x']
        y = agent_info['y']
        z = agent_info['z']
        yaw = 90 if team == 0 else -90  # 可能是算子的朝向
        # assert np.abs(x) < 15000.0 and np.abs(y) < 15000.0     # 场景是一个30000x30000的正方形，最中间的点是原点，越界的算子都会摧毁
        agent_property = copy.deepcopy(AgentPropertyDefaults)
        agent_property.update({
            'DebugAgent': False,
            # max drive/fly speed
            'MaxMoveSpeed': 720 if agent_class == 'RLA_CAR_Laser' else 600,
            # also influence object mass, please change it with causion!
            'AgentScale': {'x': 0.8, 'y': 0.8, 'z': 0.8, },
            # probability of escaping dmg 闪避
            "DodgeProb": 0.0,
            # ms explode dmg
            "ExplodeDmg": 10,
            # team belonging
            'AgentTeam': team,
            # choose ue class to init
            'ClassName': agent_class,
            # Weapon CD
            'WeaponCD': 2,
            # open fire range
            "PerceptionRange": 5000 if agent_class == 'RLA_CAR_Laser' else 5500,
            "GuardRange": 2800 if agent_class == 'RLA_CAR_Laser' else 3400,
            "FireRange": 3500 if agent_class == 'RLA_CAR_Laser' else 4800,
            # debugging
            'RSVD1': '-Ring1=2000 -Ring2=1400 -Ring3=750' if agent_class == 'RLA_CAR_Laser' else '-Ring1=2500 -Ring2=1700 -Ring3=1400',
            # regular
            'RSVD2': '-InitAct=ActionSet2::Idle;AsFarAsPossible',
            # agent hp
            # 'AgentHp':np.random.randint(low=95,high=105) if agent_class == 'RLA_CAR_Laser' else np.random.randint(low=145,high=155),
            'AgentHp': 105 if agent_class == 'RLA_CAR_Laser' else 155,
            # the rank of agent inside the team
            'IndexInTeam': tid,
            # the unique identity of this agent in simulation system
            'UID': uid,
            "IsTeamReward": False,
            # show color
            'Color': '(R=1,G=0,B=0,A=1)' if team == 0 else '(R=0,G=0,B=1,A=1)',
            # initial location
            'InitLocation': {'x': x, 'y': y, 'z': z, },
            # initial facing direction et.al.
            'InitRotator': {'pitch': 0, 'roll': 0, 'yaw': yaw, },
        }),
        return agent_property

    def init_air(self, agent_info, pos_ro):
        N_COL = 4
        agent_class = agent_info['type']
        team = agent_info['team']
        n_team_agent = agent_info['n_team_agent']
        tid = agent_info['tid']
        uid = agent_info['uid']

        x = 0 + 800 * (tid - n_team_agent // 2) // N_COL
        y = 2000 * (-1) ** (team + 1)
        x, y = np.matmul(np.array([x, y]),
                         np.array([[np.cos(pos_ro), -np.sin(pos_ro)], [np.sin(pos_ro), np.cos(pos_ro)]]))
        z = 1000
        yaw = 90 if team == 0 else -90
        assert np.abs(x) < 15000.0 and np.abs(y) < 15000.0
        agent_property = copy.deepcopy(AgentPropertyDefaults)
        agent_property.update({
            'DebugAgent': False,
            # max drive/fly speed
            'MaxMoveSpeed': 900,
            # also influence object mass, please change it with causion!
            'AgentScale': {'x': 0.5, 'y': 0.5, 'z': 0.5, },
            # probability of escaping dmg 闪避
            "DodgeProb": 0.0,
            # ms explode dmg
            "ExplodeDmg": 10,
            # team belonging
            'AgentTeam': team,
            # choose ue class to init
            'ClassName': agent_class,
            # Weapon CD
            'WeaponCD': 3,
            # open fire range
            "PerceptionRange": 2500,
            "GuardRange": 1800,
            "FireRange": 1700,
            # debugging
            'RSVD1': '-ring1=2500 -ring2=1800 -ring3=1700',
            # regular
            'RSVD2': '-InitAct=ActionSet2::Idle;StaticAlert',
            # agent hp
            'AgentHp': 50,
            # the rank of agent inside the team
            'IndexInTeam': tid,
            # the unique identity of this agent in simulation system
            'UID': uid,
            # show color
            'Color': '(R=0,G=1,B=0,A=1)' if team == 0 else '(R=0,G=0,B=1,A=1)',
            # initial location
            'InitLocation': {'x': x, 'y': y, 'z': z, },
            # initial facing direction et.al.
            'InitRotator': {'pitch': 0, 'roll': 0, 'yaw': yaw, },
        }),
        return agent_property
    # def init_air(self, agent_info, pos_ro):
    #     N_COL = 4
    #     agent_class = agent_info['type']
    #     team = agent_info['team']
    #     n_team_agent = agent_info['n_team_agent']
    #     tid = agent_info['tid']
    #     uid = agent_info['uid']
    #
    #     x = agent_info['x']
    #     y = agent_info['y']
    #     z = agent_info['z']  # 500 is slightly above the ground
    #     yaw = 90 if team == 0 else -90
    #     # assert np.abs(x) < 15000.0 and np.abs(y) < 15000.0
    #     agent_property = copy.deepcopy(AgentPropertyDefaults)
    #     agent_property.update({
    #         'DebugAgent': False,
    #         # max drive/fly speed
    #         'MaxMoveSpeed': 900,
    #         # also influence object mass, please change it with causion!
    #         'AgentScale': {'x': 0.8, 'y': 0.8, 'z': 0.8, },
    #         # probability of escaping dmg 闪避
    #         "DodgeProb": 0.0,
    #         # ms explode dmg
    #         "ExplodeDmg": 10,
    #         # team belonging
    #         'AgentTeam': team,
    #         # choose ue class to init
    #         'ClassName': agent_class,
    #         # Weapon CD
    #         'WeaponCD': 3,
    #         # open fire range
    #         "PerceptionRange": 5000,
    #         "GuardRange": 3600,
    #         "FireRange": 3400,
    #         # debugging
    #         'RSVD1': '-ring1=2500 -ring2=1800 -ring3=1700',
    #         # regular
    #         'RSVD2': '-InitAct=ActionSet2::Idle;StaticAlert',
    #         # agent hp
    #         'AgentHp': 50,
    #         # the rank of agent inside the team
    #         'IndexInTeam': tid,
    #         # the unique identity of this agent in simulation system
    #         'UID': uid,
    #         "IsTeamReward": False,
    #         # show color
    #         'Color': '(R=1,G=0,B=0,A=1)' if team == 0 else '(R=0,G=0,B=1,A=1)',
    #         # initial location
    #         'InitLocation': {'x': x, 'y': y, 'z': z, },
    #         # initial facing direction et.al.
    #         'InitRotator': {'pitch': 0, 'roll': 0, 'yaw': yaw, },
    #     }),
    #     return agent_property

    # def init_air(self, agent_info, pos_ro):
    #     N_COL = 4
    #     agent_class = agent_info['type']
    #     team = agent_info['team']
    #     n_team_agent = agent_info['n_team_agent']
    #     tid = agent_info['tid']
    #     uid = agent_info['uid']
    #
    #     x = 0 + 800 * (tid - n_team_agent // 2) // N_COL
    #     y = 2000 * (-1) ** (team + 1)
    #     x, y = np.matmul(np.array([x, y]),np.array([[np.cos(pos_ro), -np.sin(pos_ro)], [np.sin(pos_ro), np.cos(pos_ro)]]))
    #     z = 1000
    #     yaw = 90 if team==0 else -90
    #     # assert np.abs(x) < 15000.0 and np.abs(y) < 15000.0
    #     agent_property = copy.deepcopy(AgentPropertyDefaults)
    #     agent_property.update({
    #             'DebugAgent': False,
    #             # max drive/fly speed
    #             'MaxMoveSpeed':  900,
    #             # also influence object mass, please change it with causion!
    #             'AgentScale'  : { 'x': 0.5,  'y': 0.5, 'z': 0.5, },
    #             # probability of escaping dmg 闪避
    #             "DodgeProb": 0.0,
    #             # ms explode dmg
    #             "ExplodeDmg": 10,
    #             # team belonging
    #             'AgentTeam': team,
    #             # choose ue class to init
    #             'ClassName': agent_class,
    #             # Weapon CD
    #             'WeaponCD': 3,
    #             # open fire range
    #             "PerceptionRange":  2500,
    #             "GuardRange":       1800,
    #             "FireRange":        1700,
    #             # debugging
    #             'RSVD1': '-ring1=2500 -ring2=1800 -ring3=1700',
    #             # regular
    #             'RSVD2': '-InitAct=ActionSet2::Idle;StaticAlert',
    #             # agent hp
    #             'AgentHp':50,
    #             # the rank of agent inside the team
    #             'IndexInTeam': tid,
    #             # the unique identity of this agent in simulation system
    #             'UID': uid,
    #             # show color
    #             'Color':'(R=0,G=1,B=0,A=1)' if team==0 else '(R=0,G=0,B=1,A=1)',
    #             # initial location
    #             'InitLocation': { 'x': x,  'y': y, 'z': z, },
    #             # initial facing direction et.al.
    #             'InitRotator': { 'pitch': 0,  'roll': 0, 'yaw': yaw, },
    #     }),
    #     return agent_property
