import time, traceback, os
import numpy as np
import random
from Utils.tensor_ops import my_view, __hash__, repeat_at, gather_righthand
from Common.action_set_encoder import encode_action_as_digits
from config import GlobalConfig
from collections import Counter

def str_array_to_num(str_arr):
    out_arr = []
    buffer = {}
    for str in str_arr:
        if str not in buffer:
            buffer[str] = len(buffer)
        out_arr.append(buffer[str])
    return out_arr


def itemgetter(*items):
    # same with operator.itemgetter
    def g(obj): return tuple(obj[item] if item in obj else None for item in items)

    return g


class ActionConvertLegacy:
    """
        动作编码转换类：将RL模型生成的动作序列转化为约定的动作编号
    """

    def __init__(self, SELF_TEAM_ASSUME, OPP_TEAM_ASSUME, OPP_NUM_ASSUME) -> None:
        """
            初始化智能体的动作空间
        @param SELF_TEAM_ASSUME: 友方队伍编号：默认为0
        @param OPP_TEAM_ASSUME: 敌方队伍编号：默认为1
        @param OPP_NUM_ASSUME:  敌方单位数量
        """
        self.SELF_TEAM_ASSUME = SELF_TEAM_ASSUME
        self.OPP_TEAM_ASSUME = OPP_TEAM_ASSUME
        self.OPP_NUM_ASSUME = OPP_NUM_ASSUME
        # (main_cmd, sub_cmd, points = None, x=None, y=None, z=None, UID=None, T=None, T_index=None)
        random_ints = [random.randint(1, 100) for _ in range(36)]

        self.dictionary_args = [
            ('Idle', 'N/A',  None, None, None, None, None, None),   # 0

            ('Guard', '+X', None, None, None, None, None, None),  # 1
            ('Guard', '+Y', None, None, None, None, None, None),  # 8
            ('Guard', '-X', None, None, None, None, None, None),  # 9
            ('Guard', '-Y', None, None, None, None, None, None),  # 10
            ('Guard', '+X+Y', None, None, None, None, None, None),  # 7
            ('Guard', '-X+Y', None, None, None, None, None, None),  # 7
            ('Guard', '-X-Y', None, None, None, None, None, None),  # 7
            ('Guard', '+X-Y', None, None, None, None, None, None),  # 7

            ('Moving', '+X', None, None, None, None, None, None),  # 1
            # ('Moving', 'N/A', None, None, None, None, None, None),  # 1
            ('Moving', '+Y', None, None, None, None, None, None),  # 8
            ('Moving', '-X', None, None, None, None, None, None),  # 9
            ('Moving', '-Y', None, None, None, None, None, None),  # 10
            ('Moving', '+X+Y', None, None, None, None, None, None),  # 7
            ('Moving', '-X+Y', None, None, None, None, None, None),  # 7
            ('Moving', '-X-Y', None, None, None, None, None, None),  # 7
            ('Moving', '+X-Y', None, None, None, None, None, None),  # 7

            ('Moving', '+X+Z', None, None, None, None, None, None),  # 7
            ('Moving', '+Y+Z', None, None, None, None, None, None),  # 8
            ('Moving', '-X+Z', None, None, None, None, None, None),  # 9
            ('Moving', '-Y+Z', None, None, None, None, None, None),  # 10
            ('Moving', '+X+Y+Z', None, None, None, None, None, None),  # 7
            ('Moving', '-X+Y+Z', None, None, None, None, None, None),  # 7
            ('Moving', '-X-Y+Z', None, None, None, None, None, None),  # 7
            ('Moving', '+X-Y+Z', None, None, None, None, None, None),  # 7

            ('Moving', '+X-Z', None, None, None, None, None, None),  # 7
            ('Moving', '+Y-Z', None, None, None, None, None, None),  # 8
            ('Moving', '-X-Z', None, None, None, None, None, None),  # 9
            ('Moving', '-Y-Z', None, None, None, None, None, None),  # 10
            ('Moving', '+X+Y-Z', None, None, None, None, None, None),  # 7
            ('Moving', '-X+Y-Z', None, None, None, None, None, None),  # 7
            ('Moving', '-X-Y-Z', None, None, None, None, None, None),  # 7
            ('Moving', '+X-Y-Z', None, None, None, None, None, None),  # 7

            ('Moving', '+Z', None, None, None, None, None, None),  # 7
            ('Moving', '-Z', None, None, None, None, None, None),  # 7

            ('GrenadeAttacking', '+X+Y', None, None, None, None, None, None),  # 7
            ('GrenadeAttacking', '-X+Y', None, None, None, None, None, None),  # 7
            ('GrenadeAttacking', '+Y', None, None, None, None, None, None)  # 7
        ]
        if self.OPP_NUM_ASSUME >0:
            for i in range(self.OPP_NUM_ASSUME):
                # self.dictionary_args.append(('NormalAttacking', 'N/A', None, None, None, None, None, OPP_TEAM_ASSUME, i))
                self.dictionary_args.append(('NormalAttacking', 'N/A', None, None, None, None, OPP_TEAM_ASSUME, i))

    def convert_act_arr(self, type, a):
        """
            修改无人机的可执行动作，比如无人机不执行攻击，无人机只进行静态警戒等
        @param type: 智能体代表的单位的类型，如无人机和士兵等
        @param a: 动作在动作列表中的索引
        @return: 编码后的动作数组，维度为[动作数量，8]
        """
        # if type == 'Drone' or type == 'Vehicle':

        if type == 'BP_RoboDog_C' or type == 'BP_MNWS_Vehicle_6x6UGV_C' or type == 'BP_Base_UAV_C':
            args = self.dictionary_args[a]
            if args[0] == 'GrenadeAttacking':
                # return encode_action_as_digits('N/A', 'N/A', None, None, None, None, None, None)
                return encode_action_as_digits('Idle', 'N/A', None, None, None, None, None, None)
            return encode_action_as_digits(*args)
        else:
            return encode_action_as_digits(*self.dictionary_args[a])

    def get_tp_avail_act(self, type, uid):
        """
            生成智能体可执行的动作标志序列
            该该序列的作用是，根据用户的配置，确定是否对RL模型生成的动作进行校正，以屏蔽一些不合理的动作
        @param type:智能体代表的单位的类型，如无人机和士兵等
        @param uid:智能体代表的单位的ID(全局唯一ID)
        @return avail_act: 0和1组成的数组，shape(动作数量)，动作可执行(1)，动作不可执行(0)
        """
        DISABLE = 0
        ENABLE = 1
        n_act = len(self.dictionary_args)
        ret = np.zeros(n_act) + ENABLE
        for i in range(n_act):
            args = self.dictionary_args[i]

            # for all kind of agents
            # if args[0] == 'PatrolMoving':       ret[i] = DISABLE   # 2025/09

            # if type == 'RLA_UAV_Support':
            #     if args[0] == 'PatrolMoving':       ret[i] = DISABLE
            #     if args[0] == 'SpecificAttacking':  ret[i] = DISABLE
            #     if args[0] == 'Idle':               ret[i] = DISABLE
            #     if args[1] == 'StaticAlert':        ret[i] = ENABLE

            if type == 'BP_RoboDog_C' or type == 'BP_MNWS_Vehicle_6x6UGV_C' or type == 'BP_Base_UAV_C':
                if args[0] == 'GrenadeAttacking':   ret[i] = DISABLE

            if type == 'BP_RoboDog_C' or type == 'BP_MNWS_Vehicle_6x6UGV_C':
                if args[0] == 'Moving':
                    if args[1] == '+Z' or args[1] == '-Z':
                        ret[i] = DISABLE

            if type == 'BP_MNWS_Vehicle_6x6UGV_C':
                if args[0] == 'Guard':   ret[i] = DISABLE

            if (args[0] == 'NormalAttacking' or args[0] == 'GrenadeAttacking') and args[7] in uid: ret[i] = DISABLE
        return ret


class RLAlgorithmBase:
    def __init__(self, n_agent, n_thread, space, team=None):
        # super().__init__(n_agent, n_thread, space, team)

        self.n_thread = n_thread
        self.n_agent = n_agent
        self.team = team
        self.act_space = space['act_space']
        self.obs_space = space['obs_space']
        self.ScenarioConfig = GlobalConfig.ScenarioConfig
        # self.mcv = mcv
        self.device = GlobalConfig.device

        self.avail_act = None
        self.opp_agent_not_alive = None
        self._unfi_frag_ = None
        self.trajectory_pool = None

        # 2025/07  设置红蓝方RL模型的路径
        if self.team == 0:
            self.logdir = GlobalConfig.logdir_red
        elif self.team == 1:
            self.logdir = GlobalConfig.logdir_blue
        else:
            print('[Save Model]: Error model path!')

        # 2025/07 红蓝方Agent数量分开设置
        if self.team == 0:
            GlobalConfig.AlgorithmConfig_Red.num_agent = n_agent
        else:
            GlobalConfig.AlgorithmConfig_Blue.num_agent = n_agent

        self.Enable_Reward_Sharing = False
        if hasattr(GlobalConfig.ScenarioConfig, 'Enable_Reward_Sharing'):
            self.Enable_Reward_Sharing = GlobalConfig.ScenarioConfig.Enable_Reward_Sharing

        if self.ScenarioConfig.EntityOriented:
            self.rawob_dim = self.ScenarioConfig.obs_vec_length
        else:
            self.rawob_dim = space['obs_space']['obs_shape']

        if len(GlobalConfig.ScenarioConfig.N_AGENT_EACH_TEAM) >= 2:
            self.action_converter = ActionConvertLegacy(
                SELF_TEAM_ASSUME=team,
                OPP_TEAM_ASSUME=(1 - team),
                OPP_NUM_ASSUME=GlobalConfig.ScenarioConfig.N_AGENT_EACH_TEAM[1 - team]
            )
        else:
            self.action_converter = ActionConvertLegacy(
                SELF_TEAM_ASSUME=team,
                OPP_TEAM_ASSUME= 1,
                OPP_NUM_ASSUME=0
            )

        self.n_actions = len(self.action_converter.dictionary_args)

        # if not hasattr(self, 'agent_type'):
        #     self.agent_uid = GlobalConfig.ScenarioConfig.AGENT_ID_EACH_TEAM[self.team]
        #     self.agent_type = [agent['type'] for agent in GlobalConfig.ScenarioConfig.SubTaskConfig.agent_list if
        #                        agent['team'] == self.team]

        # check parameters
        self.patience = 2000

    def set_algorithm_trainer(self):
        pass

    def interact_with_env(self, StateRecall):

        if not hasattr(self, 'agent_type'):
            self.agent_uid = GlobalConfig.ScenarioConfig.AGENT_ID_EACH_TEAM[self.team]
            self.agent_type = [agent['type'] for agent in GlobalConfig.ScenarioConfig.SubTaskConfig.agent_list if
                               agent['team'] == self.team]

        if self.team == 0:
            self.opp_agent_not_alive = StateRecall['opp-agent-not-alive_team1']
        else:
            self.opp_agent_not_alive = StateRecall['opp-agent-not-alive_team0']

            # 2025/07 区分红蓝方
        if self.team == 0 and GlobalConfig.AlgorithmConfig_Red.action_filter_enabled:
            self.avail_act = np.array([np.stack(
                tuple(self.action_converter.get_tp_avail_act(tp, self.opp_agent_not_alive[index]) for tp in
                      self.agent_type)) for index in range(self.opp_agent_not_alive.shape[0])])
            # self.avail_act = repeat_at(self.avail_act, insert_dim=0, n_times=self.n_thread)

        if self.team == 1 and GlobalConfig.AlgorithmConfig_Blue.action_filter_enabled:
            self.avail_act = np.array([np.stack(
                tuple(self.action_converter.get_tp_avail_act(tp, self.opp_agent_not_alive[index]) for tp in
                      self.agent_type)) for index in range(self.opp_agent_not_alive.shape[0])])
        # self.avail_act = repeat_at(self.avail_act, insert_dim=0, n_times=self.n_thread)

        obs = StateRecall['Latest-Obs']
        obs = my_view(obs, [0, 0, -1, self.rawob_dim])
        obs[(obs == 0).all(-1)] = np.nan

        P = StateRecall['ENV-PAUSE']
        R = ~P
        RST = StateRecall['Env-Suffered-Reset']

        if RST.all():
            EpRsn = np.random.rand(self.n_thread) < 0
            StateRecall['_EpRsn_'] = EpRsn

        obs_feed = obs[R]
        I_StateRecall = {
            'obs': obs_feed,
            'Test-Flag': StateRecall['Test-Flag'],
            '_EpRsn_': StateRecall['_EpRsn_'][R],
            'threads_active_flag': R,
            'Latest-Team-Info': StateRecall['Latest-Team-Info'][R],
        }

        # --------------------------
        # if not StateRecall['Test-Flag']: self.train()

        P = StateRecall['ENV-PAUSE']
        R = ~P

        # 2025/07-----修改开始
        if self.team == 0 and GlobalConfig.AlgorithmConfig_Red.action_filter_enabled:
            I_StateRecall.update({
                'avail_act': self.avail_act[R],
            })

        if self.team == 1 and GlobalConfig.AlgorithmConfig_Blue.action_filter_enabled:
            I_StateRecall.update({
                'avail_act': self.avail_act[R],
            })

        act = np.zeros(shape=(self.n_thread, self.n_agent), dtype=int) - 1  # 初始化全部为 -1
        act_active, internal_recall = self.making_decision(I_StateRecall, StateRecall['Test-Flag'])
        act[R] = act_active

        if self.team == 0 and GlobalConfig.AlgorithmConfig_Red.action_filter_enabled and self.patience > 0:
            self.patience -= 1
            assert (gather_righthand(self.avail_act, repeat_at(act, -1, 1), check=False)[R] == 1).all()

        if self.team == 1 and GlobalConfig.AlgorithmConfig_Blue.action_filter_enabled and self.patience > 0:
            self.patience -= 1
            assert (gather_righthand(self.avail_act, repeat_at(act, -1, 1), check=False)[R] == 1).all()

        # actions_list = np.array(
        #     [[self.action_converter.convert_act_arr(self.agent_type[agentid], act) for agentid, act in enumerate(th)]
        #      for th in act])

        actions_list = [[self.action_converter.convert_act_arr(self.agent_type[agentid], act) for agentid, act in enumerate(th)] for th in act]

        # 2025/07-----修改结束

        # 注册返回下一个状态、奖励和done信息的回调函数
        if not StateRecall['Test-Flag']:
            StateRecall['_hook_'] = internal_recall['_hook_']
            assert StateRecall['_hook_'] is not None

        return actions_list, StateRecall

    def making_decision(self, StateRecall, test_mode):
        raise NotImplementedError

    def save_model(self, update_cnt, info=None):
        raise NotImplementedError

    def process_framedata(self, traj_framedata):
        # items_to_pop = ['info']  # 修改2024/08
        # for k in items_to_pop:
        #     if k in traj_framedata:
        #         traj_framedata.pop(k)
        # the agent-wise reward is supposed to be the same, so averge them
        if self.ScenarioConfig.Enable_Reward_Sharing:
            traj_framedata['reward'] = repeat_at(traj_framedata['reward'], insert_dim=-1, n_times=self.n_agent)
        # change the name of done to be recognised (by trajectory manager)
        traj_framedata['_DONE_'] = traj_framedata.pop('done')
        traj_framedata['_TOBS_'] = traj_framedata.pop(
            'terminal_state') if 'terminal_state' in traj_framedata else None
        # mask out pause thread
        traj_framedata = self.mask_paused_env(traj_framedata)
        # self.traj_manager为派生类中实现的训练模型的实例
        self.trajectory_pool.feed_traj_framedata(traj_framedata)
        return traj_framedata

    def check_reward_type(self, AlgorithmConfig):
        if self.ScenarioConfig.Enable_Reward_Sharing != AlgorithmConfig.TakeRewardAsUnity:
            assert self.ScenarioConfig.Enable_Reward_Sharing
            assert not AlgorithmConfig.TakeRewardAsUnity
            print(
                'Warning, the scenario (Environment) provide `Enable_Reward_Sharing`, but AlgorithmConfig does not `Enable_Reward_Sharing` !')
            print(
                'If you continue, team reward will be duplicated to serve as individual rewards, wait 3s to proceed...')
            time.sleep(3)

    def mask_paused_env(self, frag):
        running = ~frag['_SKIP_']
        if running.all():
            return frag
        for key in frag:
            if not key.startswith('_') and hasattr(frag[key], '__len__') and len(frag[key]) == self.n_thread:
                frag[key] = frag[key][running]
        return frag

    '''
        Get event from hmp task runner, called when each test rotinue is complete.
    '''

    def on_notify(self, message, **kargs):
        self.save_model(
            self.trajectory_pool.update_cnt,
            info=str(kargs)
        )

    ''' 
        function to be called when reward is received
    '''

    def trace_update_callback(self, unfi_frag, req_hook=True):
        assert self._unfi_frag_ is None
        self._unfi_frag_ = unfi_frag    # RL模型生成的训练数据信息
        self._check_data_hash()  # check data integraty
        if req_hook:
            # leave a hook
            return self.traj_waiting_hook
        else:
            return None

    def traj_waiting_hook(self, new_frag):
        ''' 
            This function will be called from <multi_team.py::deal_with_hook()>
            hook is called when reward and next moment observation is ready
        '''
        # do data curruption check at beginning, this is important!
        self._check_data_curruption()
        # finish the frame data with new data feedin
        fi_frag = self._unfi_frag_
        fi_frag.update(new_frag)
        # call upper level function to deal with frame data
        self.process_framedata(traj_framedata=fi_frag)
        # delete data reference
        self._unfi_frag_ = None

    def _no_hook(self, new_frag):
        return

    # protect data from overwriting
    def _check_data_hash(self):
        if self.patience > 0:
            self.patience -= 1
            self.hash_db = {}
            # for debugging, to detect write protection error
            for key in self._unfi_frag_:
                item = self._unfi_frag_[key]
                if isinstance(item, dict):
                    self.hash_db[key] = {}
                    for subkey in item:
                        subitem = item[subkey]
                        self.hash_db[key][subkey] = __hash__(subitem)
                else:
                    self.hash_db[key] = __hash__(item)

    # protect data from overwriting
    def _check_data_curruption(self):
        if self.patience > 0:
            self.patience -= 1
            assert self._unfi_frag_ is not None
            assert self.hash_db is not None
            for key in self._unfi_frag_:
                item = self._unfi_frag_[key]
                if isinstance(item, dict):
                    for subkey in item:
                        subitem = item[subkey]
                        assert self.hash_db[key][subkey] == __hash__(subitem), ('Currupted data!')
                else:
                    assert self.hash_db[key] == __hash__(item), ('Currupted data!')

    # 从子类移动到父类的方法
    def _create_config_fly(self):

        logdir = GlobalConfig.logdir
        self.input_file_dir = '%s/cmd_io.txt' % logdir
        if not os.path.exists(self.input_file_dir):
            with open(self.input_file_dir, 'w+', encoding='utf8') as f: f.writelines(["# Write cmd at next line: ", ""])

    # 从子类移动到父类的方法
    def _config_on_fly(self):
        if not os.path.exists(self.input_file_dir): return

        with open(self.input_file_dir, 'r', encoding='utf8') as f:
            cmdlines = f.readlines()

        cmdlines_writeback = []
        any_change = False

        for cmdline in cmdlines:
            if cmdline.startswith('#') or cmdline == "\n" or cmdline == " \n":
                cmdlines_writeback.append(cmdline)
            else:
                any_change = True
                try:
                    print('[RLAgentAlgorithm.py] ------- executing: %s ------' % cmdline)
                    exec(cmdline)
                    cmdlines_writeback.append('# [execute successfully]\t' + cmdline)
                except:
                    print(traceback.format_exc())
                    cmdlines_writeback.append('# [execute failed]\t' + cmdline)

        if any_change:
            with open(self.input_file_dir, 'w+', encoding='utf8') as f:
                f.writelines(cmdlines_writeback)

    def generate_reward(self, state, env):
        print("Call team-%d algorithm reward function" % self.team)
        reward = np.zeros(shape=self.n_agent)
        if self.Enable_Reward_Sharing:  # Enable_Reward_Sharing为true，每个队伍所有智能体共享奖励
            reward = np.zeros(shape=env.n_team)

        return reward