import os, torch, shutil
from config import GlobalConfig
from Utils.tensor_ops import repeat_at, gather_righthand
from Common.AlgorithmBase import RLAlgorithmBase
import numpy as np
from collections import Counter
from Common.action_set_encoder import encode_action_as_digits


class AlgorithmConfig:
    """
        强化学习模型及训练相关的配置信息。其中某些配置项会被启动文件（json）中的GlobalConfig或ScenarioConfig中的配置覆写
    """
    num_heads = 8   # 多头注意力机制中设置Head的数量
    gamma = 0.99    # 折扣因子
    tau = 0.95      # 使用通用优势估计（GAE）时引入的平滑参数，该参数平衡即时奖励和长期奖励的权重（较小值偏向即时奖励，较大值考虑更多的未来奖励）
    num_training_trajectories = 3     # 在模型开始训练前需要采集的轨迹数据的数量（每一轮训练对应一条训练轨迹）
    use_batch_norm = True    # 数据归一化标志
    load_trained_model = False     # 是否加载已经训练好的模型
    specific_model_path = ''   # 设置加载模型的路径

    train_epoch = 16    # 设置训练的轮次
    num_batches = 1     # 设置将训练数据集划分为几个batch
    max_grad_norm = 0.5     # 设置梯度裁剪范数
    learning_rate = 1e-4   # 学习率
    memory_safety_check = False   # 设置防止内存/显存溢出标志（设置为true，将训练数据集划分为几个batch）
    reward_forwarding = False      # 设置是否进行智能体奖励转发（reward forwarding）的标志
    reward_forwarding_gamma = 0.99     # 设置“死亡”智能体未来奖励的折扣因子，用于将“死亡”智能体的未来奖励转发给存活智能体

    hidden_dim = 256  # 设置模型中网络的隐藏层维度
    num_agent = 'auto load, do not change'    # 保存一个队伍中智能体数量的属性（自动在Algorithm类的基类中设置）
    action_filter_enabled = True        # 设置是否使用可用动作标志数组来过滤模型生成的动作
    skip_test = True      # 忽略测试标志


class ReinforceAgentAlgorithm(RLAlgorithmBase):
    def __init__(self, n_agent, n_thread, space, team=None):
        super().__init__(n_agent, n_thread, space, team)

        self.trainer = None
        self.policy = None
        self.agent_list = None
        self.n_thread = n_thread
        self.avail_act = None
        self.opp_agent_not_alive = None
        self.opp_agent_uid = None

        self.previous_distance = None

        # ----------------------开始设置RL模型-------------------------
        # from .qmix_net import QMIX
        # self.policy = QMIX(num_agents=n_agent, state_dim=self.rawob_dim * GlobalConfig.ScenarioConfig.obs_n_entity,
        #                    action_space=self.n_actions, device=self.device)
        # self.policy = self.policy.to(self.device)
        # # ----------------------结束设置RL模型-------------------------
        #
        # # initialize optimizer and trajectory (batch) manager
        # from .train import Trainer
        # from Common.trajectory import TrajectoryManager
        # self.trainer = Trainer(self.policy, train_config=AlgorithmConfig)
        # self.trajectory_pool = TrajectoryManager(
        #     num_env=n_thread, max_trajectory_data=int(GlobalConfig.ScenarioConfig.MaxStepsPerEpisode),
        #     train_model_func=self.trainer.train_model_on_trajectories,team=self.team)
        #
        # if AlgorithmConfig.load_trained_model:
        #     self.load_model()

        # 2025/07：Obtain parent folder name
        current_file_path = os.path.abspath(__file__)
        current_dir_path = os.path.dirname(current_file_path)
        parent_dir_path = os.path.dirname(current_dir_path)
        parent_folder_name = os.path.basename(parent_dir_path)
        self.model_name = parent_folder_name
        self.logdir = ('./TeamAlg/%s/TrainLogs/' % self.model_name)

    def set_algorithm_trainer(self):
        from .qmix_net import QMIX
        self.policy = QMIX(num_agents=self.n_agent, state_dim=self.rawob_dim * GlobalConfig.ScenarioConfig.obs_n_entity,
                           action_space=self.n_actions, device=self.device)
        self.policy = self.policy.to(self.device)
        # ----------------------结束设置RL模型-------------------------

        # initialize optimizer and trajectory (batch) manager
        from .train import Trainer
        from Common.trajectory import TrajectoryManager
        self.trainer = Trainer(self.policy, train_config=AlgorithmConfig)
        self.trajectory_pool = TrajectoryManager(
            num_env=self.n_thread, max_trajectory_data=int(GlobalConfig.ScenarioConfig.MaxStepsPerEpisode),
            train_model_func=self.trainer.train_model_on_trajectories,team=self.team)

        if AlgorithmConfig.load_trained_model:
            self.load_model()

    def making_decision(self, state, test_mode):
        # make sure hook is cleared
        assert ('_hook_' not in state)

        # 训练模型-需要判断轨迹池中轨迹的数量是否达到用户设置的数量
        if not state['Test-Flag'] and GlobalConfig.train_mode: self.train()

        # 从训练环境返回的状态信息中读取状态数据obs
        observation, threads_active_flag, avail_act = \
            tuple(state[item] if item in state else None for item
                  in ('obs', 'threads_active_flag', 'avail_act'))

        assert observation is not None and len(observation) == sum(threads_active_flag)
        # make sure avail_act is correct
        if AlgorithmConfig.action_filter_enabled: assert avail_act is not None

        # make decision
        action = self.policy.select_action(states=observation,
                                           test_mode=(test_mode and not AlgorithmConfig.skip_test),
                                           avail_act=avail_act)
        traj_framefrag = {
            "_SKIP_": ~threads_active_flag,
            "avail_act": avail_act,
            "state": observation,
            "action": action,
        }
        if avail_act is not None: traj_framefrag.update({'avail_act': avail_act})
        # deal with rollout later when the reward is ready, leave a hook as a callback here
        if not test_mode: state['_hook_'] = self.trace_update_callback(traj_framefrag, req_hook=True)
        return action.copy(), state

    def generate_reward(self, state, env):
        print("Call team-%d algorithm reward function" % self.team)

        # 默认设置：为每个队伍的每个单位生成独立的奖励，奖励值初始化为0
        # reward = np.zeros(shape=self.n_agents).reshape(self.n_teams, -1)
        # reward = np.zeros(shape=self.n_agent)
        agent_property = GlobalConfig.ScenarioConfig.SubTaskConfig.agent_list
        team_counts = Counter(item['team'] for item in agent_property)

        if self.agent_list is None or env.current_step == 1: self.agent_list = np.array(
            [agent for agent in env.agents if agent.team == self.team])

        if self.Enable_Reward_Sharing:  # Enable_Reward_Sharing为true，每个队伍所有智能体共享奖励
            reward = np.zeros(shape=env.n_teams)
        else:
            reward = np.zeros(shape=team_counts[self.team])

        if self.team == 1:
            for index, agent in enumerate(self.agent_list):
                # 除了存活还有类型限制，比如无人机的侦察范围要更大，但是也有范围限制
                if agent.alive:
                    if 8990 > agent.location[0] or agent.location[0] > 25970 or 27720 > agent.location[1] or agent.location[1] > 37540:
                        if not self.Enable_Reward_Sharing:
                            reward[index] = reward[index] - 0.01
                        else:
                            reward[self.team] = reward[self.team] - 0.01

        events = state['dataGlobal']['events']  # 服务端传回的事件
        for event in events:
            event_parsed = env.parse_event(event)  # 解析传回的事件信息，此处处理摧毁事件和结束模拟事件
            # print(event_parsed)

            if event_parsed['Event'] == 'Destroyed':  # 设置摧毁敌方单位的奖励
                damage_causer_uid = event_parsed['DamageCauser']  # 发动攻击的agent的UID
                target_id = event_parsed['Target']  # 被击毁敌方的agent的UID

                agent_causer = env.find_agent_by_uid(damage_causer_uid)  # 根据UID获得对应的agent实例
                agent_target = env.find_agent_by_uid(target_id)

                if agent_causer.team == self.team:
                    tid = agent_causer.team_id
                    if not self.Enable_Reward_Sharing:
                        reward[tid] += 0.05
                    else:
                        reward[self.team] += 0.05

                # if agent_target.team == 0:
                #     tid = agent_target.team_id
                #     reward[tid] -= 0.1

            #
            #         agent = self.find_agent_by_uid(unit_uid)  # 根据UID获得对应的agent实例
            #         team = agent.team  # 获得agent的team编号
            #         if not self.Enable_Reward_Sharing:  # Enable_Reward_Sharing为false，则每个agent都需要设置奖励值
            #             if team == 0:
            #
            #                 if agent.type == "Vehicle":
            #                     reward[agent.index] -= 0.01  # 给被摧毁单位一个负的奖励值
            #                     reward[self.n_team_agent[0]:self.n_agents] += 0.02  # 给摧毁敌方单位全部agent一个正的奖励值
            #                 else:
            #                     reward[agent.index] -= 0.01  # 给被摧毁单位一个负的奖励值
            #                     reward[self.n_team_agent[0]:self.n_agents] += 0.01  # 给摧毁敌方单位全部agent一个正的奖励值
            #
            #                 # if agent.type == "Vehicle":
            #                 #     reward[self.n_team_agent[0]:self.n_agents] += 0.02  # 给摧毁敌方单位全部agent一个正的奖励值
            #                 # else:
            #                 #     reward[self.n_team_agent[0]:self.n_agents] += 0.01  # 给摧毁敌方单位全部agent一个正的奖励值
            #             else:
            #                 # TODO----修改内容--------使用agent uid为蓝方赋值奖励--------------
            #                 # print('UID:', unit_uid)
            #                 if agent.type == "GL":
            #                     reward[int(unit_uid)] -= 0.01  # 给被摧毁单位一个负的奖励值
            #                     reward[0:self.n_team_agent[0]] += 0.02  # 给摧毁敌方单位全部agent一个正的奖励值
            #                 else:
            #                     reward[int(unit_uid)] -= 0.01  # 给被摧毁单位一个负的奖励值
            #                     reward[0:self.n_team_agent[0]] += 0.01  # 给摧毁敌方单位全部agent一个正的奖励值
            #
            #                 # if agent.type == "GL" and agent.type == "Sniper":
            #                 #     reward[0:self.n_team_agent[0]] += 0.02  # 给摧毁敌方单位全部agent一个正的奖励值
            #                 # else:
            #                 #     reward[0:self.n_team_agent[0]] += 0.01  # 给摧毁敌方单位全部agent一个正的奖励值
            #         else:
            #             reward[team] -= 0.01  # Enable_Reward_Sharing为true，则每个队伍中的所有单位共享一个奖励值
            #             reward[1 - team] += 0.01

        # reward = reward.flatten(order='C')  # 因后续处理的需要，此处需要将reward数组展平，输出shape=(所有队伍中智能体总量)。
        return reward

    def train(self):
        '''
            Get event from hmp task runner, save model now!
        '''
        if self.trajectory_pool.is_ready_to_train():
            update_cnt = self.trajectory_pool.train_with_trajectory_data()
            if update_cnt % 2 == 0:
                self.save_model(update_cnt)

    def save_model(self, update_count, info=None):
        '''
            save model now!
            save if triggered when:
            1. Update_cnt = 50, 100, ...
            2. Given info, indicating a hmp command
            3. A flag file is detected, indicating a save command from human
        '''
        if not os.path.exists('%s/model_backups/' % self.logdir):
            os.makedirs('%s/model_backups/' % self.logdir)

        pt_path = '%s/%s_model.pt' % (self.logdir, self.model_name)
        print('Saving %s model to %s' % (self.model_name, pt_path))

        # 加载优化器 2024/08修改
        save_state = {'policy': self.policy.state_dict()}
        for name, optimizer in self.policy.optimizers_dict.items():
            save_state[f'{name}_state_dict'] = optimizer.state_dict()
        torch.save(save_state, pt_path)

        # info = str(update_count) if info is None else ''.join([str(update_count), '_', info])

        if info is None:
            info = str(update_count)
        elif ':' in info:
            new_info = info.replace(':','-')
            info = ''.join([str(update_count), '_', new_info])
        else:
            info = ''.join([str(update_count), '_', info])

        pt_path2 = '%s/model_backups/%s_model_%s.pt' % (self.logdir, self.model_name, info)
        shutil.copyfile(pt_path, pt_path2)

        print('[Save Model]: Saving %s model finished!' % self.model_name)

    def load_model(self):
        '''
            load model now
        '''

        manual_dir = AlgorithmConfig.specific_model_path
        ckpt_dir = '%s/%s_model.pt' % (self.logdir, self.model_name) if manual_dir == '' else '%s/%s' % (
            self.logdir, manual_dir)
        cuda_n = 'cpu' if 'cpu' in self.device else self.device
        strict = True

        cpt = torch.load(ckpt_dir, map_location=cuda_n)
        self.policy.load_state_dict(cpt['policy'], strict=strict)

        # 加载优化器 2024/08修改
        for name, optimizer in self.policy.optimizers_dict.items():
            optimizer.load_state_dict(cpt[f'{name}_state_dict'])

        print('[Load Model]:', ckpt_dir)
