import re
import numpy as np
from pyglet.libs.win32.constants import FROM_LEFT_1ST_BUTTON_PRESSED

from config import GlobalConfig
from Common.cssim_env_wrapper import CssimEnv
from Common.CssimAgentEnvBase import CssimCommonFn
from Common.AlgorithmBase import RLAlgorithmBase
from Common.DummyAlgorithmBase import DummyAlgorithmBase

class CssimAgentEnv(CssimCommonFn, CssimEnv):
    def __init__(self, rank, algorithms) -> None:
        super().__init__(rank)
        self.previous_distance = []
        self.agent_list = None
        self.team = 0
        # 2025/07: Add reward function parameter---Midify begin
        self.red_reward_fun = None
        self.blue_reward_fun = None
        self.algorithms = algorithms
        self.red_agents = None
        self.blue_agents = None
        # 2025/07: Add reward function parameter---Modify end

    def reset(self):
        """
        对环境进行初始化
        返回值为初始化后的环境状态state和info(队伍中各单位的位置、存活状态、位置等信息)
        @return state, info:
        """

        # 2025/07: Add reward function parameter---Midify begin
        assert len(self.algorithms) == 2, "Team number error!!"
        self.register_reward_function(self.algorithms[0],self.algorithms[1])
        # 2025/07: Add reward function parameter---Modify end

        if self.max_episode and GlobalConfig.ScenarioConfig.render:
            if self.episode >= self.max_episode: return
        state, info = super().reset()

        self.red_agents = [agent for agent in self.agents if agent.team == 0]
        self.blue_agents = [agent for agent in self.agents if agent.team == 1]

        self.algorithms[0].previous_distance = np.full((len(self.red_agents),), np.inf)
        self.algorithms[1].previous_distance = np.full((len(self.blue_agents),), np.inf)

        self.algorithms[0].n_agent = len(self.red_agents)
        self.algorithms[1].n_agent = len(self.blue_agents)

        # self.
        # self.agent_list = np.array([agent for agent in self.agents if agent.team == self.team])
        # self.previous_distance = np.full((self.agent_list.shape[0],), np.inf)
        return state, info

    def step(self, act):
        """
        智能体与环境之间的单步交互
        接受智能体的动作作为输入，并放回新的状态、奖励、是否终止标志以及可能的其他额外信息
        @param act:模型生成的动作
        @return next_state, reward, done, info:
        """
        # if self.episode >= self.max_episode: return
        try:
            next_state, reward, done, info = super().step(act)
            return next_state, reward, done, info
        except TypeError:
            # 专门捕获"无法解包None"的错误
            return None  # 或返回特定标识表示接收失败
            # 其他异常可按需捕获（如需要）
        except Exception as e:
            # 处理其他可能的异常（可选）
            return None

    # 2025/07: The newly added reward function register function
    def register_reward_function(self, red_alg, blue_alg):
        """
        向环境类注册红蓝方算法类中设计的奖励函数
        @param red_fun:红方奖励函数
        @param blue_fun: 蓝方奖励函数
        """
        assert red_alg is not None, 'The reward function for the red team is None!!!'
        assert blue_alg is not None, 'The reward function for the blue team is None!!!'

        # 2025/07: If a team uses the script algorith, then the reward generate function will not be registered
        if isinstance(blue_alg,DummyAlgorithmBase):
            self.blue_reward_fun = None
        else:
            self.blue_reward_fun = blue_alg.generate_reward

        # 2025/07: If a team uses the script algorith, then the reward generate function will not be registered
        if isinstance(red_alg,DummyAlgorithmBase):
            self.red_reward_fun = None
        else:
            self.red_reward_fun = red_alg.generate_reward

    def reward_and_done(self, state):
        """
        设计智能体的奖励
        可在配置文件中设置：每个队伍中所有智能体共享一个奖励值或者为每个智能体设置单独的奖励
        @param state:包含全部智能体的状态信息和每个队伍的信息
        @return reward, WinningResult:环境返回的奖励；WinningResult为训练结果信息，包含训练结果排名，本轮训练结束的原因
        """

        # 2025/07: Call blue and red team reward generate function
        print('current step: %d' % state['dataGlobal']['timeCnt'])
        if self.blue_reward_fun is not None and self.red_reward_fun is not None:
            reward_blue = self.blue_reward_fun(state,self)
            print("Reward blue:", reward_blue)

            reward_red = self.red_reward_fun(state, self)
            print("Reward red:", reward_red)

            if not self.Enable_Reward_Sharing:
                assert reward_blue.ndim == 1 and len(reward_blue) == self.n_team_agent[1], \
                    "The length of the reward array for the red team does not match the number of red agents!!!"
                assert reward_red.ndim == 1 and len(reward_red) == self.n_team_agent[0], \
                    "The length of the reward array for the red team does not match the number of red agents!!!"
            else:
                assert reward_blue.ndim == 1 and len(reward_blue)  == self.n_teams
                assert reward_red.ndim == 1 and len(reward_red) == self.n_teams

            if not self.Enable_Reward_Sharing:
                reward = np.concatenate((reward_red, reward_blue))
            else:
                reward = reward_red + reward_blue
            print("Reward Red and Blue:",reward)

        WinningResult = None  # 初始化模拟结果数据结构
        # 默认设置：为每个队伍的每个单位生成独立的奖励，奖励值初始化为0
        # reward = np.zeros(shape=self.n_agents).reshape(self.n_teams, -1)

        # 2025/07:
        # reward = np.zeros(shape=self.n_agents)
        # if self.Enable_Reward_Sharing:  # Enable_Reward_Sharing为true，每个队伍所有智能体共享奖励
        #     reward = np.zeros(shape=self.n_teams)

        # if self.team == 0:
        #     # 设计智能体奖励的示例
        #     # target_x = state['dataGlobal']['keyObjArr'][0]['location']['x']
        #     # target_y = state['dataGlobal']['keyObjArr'][0]['location']['y']
        #     target_position = np.array([self.target_position[0], self.target_position[1]])  # 示例：队伍0任务结束位置
        #     # 示例：获取队伍0所有agent的位置
        #     agent_position = [item.pos2d for item in self.agents if item.team == self.team]
        #     current_distance_team = [np.linalg.norm(position - target_position) for position in agent_position]
        #
        #     team_belonging = np.array([agent.team for agent in self.agents])  # 获取每个智能体的队伍编号
        #     is_ally = (team_belonging == self.team)  # 获取是否属于队伍0的标志（只有队伍0可能将到达指定目标点作为任务的终点）
        #     alive_all = np.array([agent.alive for agent in self.agents])  # 获取每个队伍所有存活的单位
        #     f_alive = alive_all[is_ally]  # 获取队伍0（默认）是否存活的标志
        #     o_alive = alive_all[~is_ally]  # 获取队伍1（默认）是否存活的标志
        #
        #     # 如果敌方都被摧毁并且友方存活的单位均到达指定目标点附近，则所有存活的智能体都给与奖励
        #     # agent_list = np.array([agent for agent in self.agents if agent.team == 0])
        #     # previous_distance = np.full((agent_list.shape[0],), np.inf)
        #     n_alive = self.agent_list.shape[0]
        #     n_opp = self.n_agents
        #
        #     reach_target = np.zeros(shape=(len(self.agent_list)))  # 设置是否到达任务结束位置的标志
        #
        #     if o_alive.sum() <= 35 and self.team == 0:  # TODO----修改内容--------该判断条件可以注释掉--------------
        #         for index, agent in enumerate(self.agent_list):
        #             if agent.alive:
        #                 current_distance = current_distance_team[index]
        #                 # current_distance = np.linalg.norm(current_pos - target_position)
        #                 if current_distance < self.previous_distance[index]:
        #                     self.previous_distance[index] = current_distance
        #                     if not self.Enable_Reward_Sharing:
        #                         reward[index] = reward[index] + 0.01
        #                     else:
        #                         reward[0] = reward[0] + 0.01
        #
        #                 if current_distance < 2800:
        #                     reach_target[index] = 1
        #                     if not self.Enable_Reward_Sharing:
        #                         reward[index] = reward[index] + 0.1
        #                     else:
        #                         reward[0] = reward[0] + 0.1
        #
        #         # 如果队伍0（默认）所有存活的单位都到达目标点附近，所有智能体都获得奖励
        #         all_reached = reach_target[f_alive]
        #         reached_index = np.where(all_reached)
        #         reward_red = reward[:len(self.agent_list)]
        #         if not self.Enable_Reward_Sharing:  # Enable_Reward_Sharing为false，则每个agent都需要设置奖励值
        #             if all(all_reached):
        #                 # reward_red[f_alive] = reward_red[f_alive] + 0.15
        #                 reward[reached_index] = reward[reached_index] + 0.15
        #         else:
        #             reward[0] = reward[0] + 0.15  # index 0 为红方，红方需要到达任务终点。
        #
        events = state['dataGlobal']['events']  # 服务端传回的事件
        for event in events:
            event_parsed = self.parse_event(event)  # 解析传回的事件信息，此处处理摧毁事件和结束模拟事件
            # print(event_parsed)
            # if event_parsed['Event'] == 'Destroyed':  # 设置摧毁敌方单位的奖励
            #     damage_causer_uid = event_parsed['DamageCauser']  # 发动攻击的agent的UID
            #     target_id = event_parsed['Target']  # 被击毁敌方的agent的UID
        #
        #         # TODO----修改内容--------修改了获取agent uid的方式--------------
        #         # agent_info = event_parsed['Agent']
        #         # parts = agent_info.split('-')
        #         # if parts[1].isdigit():
        #         #     index = int(parts[1])
        #         # else:
        #         #     raise ValueError('Invalid Agent Index!!!')
        #         # team_index = int(agent_info[1])
        #         #
        #         # if team_index == 0:
        #         #     unit_uid = self.agents[index].uid
        #         # else:
        #         #     unit_uid = self.agents[self.n_team_agent[0] + index].uid
        #         # TODO----修改内容--------修改了获取agent uid的方式--------------
        #
        #         agent = self.find_agent_by_uid(unit_uid)  # 根据UID获得对应的agent实例
        #         team = agent.team  # 获得agent的team编号
        #         if not self.Enable_Reward_Sharing:  # Enable_Reward_Sharing为false，则每个agent都需要设置奖励值
        #             if team == 0:
        #
        #                 if agent.type == "Vehicle":
        #                     reward[agent.index] -= 0.01  # 给被摧毁单位一个负的奖励值
        #                     reward[self.n_team_agent[0]:self.n_agents] += 0.02  # 给摧毁敌方单位全部agent一个正的奖励值
        #                 else:
        #                     reward[agent.index] -= 0.01  # 给被摧毁单位一个负的奖励值
        #                     reward[self.n_team_agent[0]:self.n_agents] += 0.01  # 给摧毁敌方单位全部agent一个正的奖励值
        #
        #                 # if agent.type == "Vehicle":
        #                 #     reward[self.n_team_agent[0]:self.n_agents] += 0.02  # 给摧毁敌方单位全部agent一个正的奖励值
        #                 # else:
        #                 #     reward[self.n_team_agent[0]:self.n_agents] += 0.01  # 给摧毁敌方单位全部agent一个正的奖励值
        #             else:
        #                 # TODO----修改内容--------使用agent uid为蓝方赋值奖励--------------
        #                 # print('UID:', unit_uid)
        #                 if agent.type == "GL":
        #                     reward[int(unit_uid)] -= 0.01  # 给被摧毁单位一个负的奖励值
        #                     reward[0:self.n_team_agent[0]] += 0.02  # 给摧毁敌方单位全部agent一个正的奖励值
        #                 else:
        #                     reward[int(unit_uid)] -= 0.01  # 给被摧毁单位一个负的奖励值
        #                     reward[0:self.n_team_agent[0]] += 0.01  # 给摧毁敌方单位全部agent一个正的奖励值
        #
        #                 # if agent.type == "GL" and agent.type == "Sniper":
        #                 #     reward[0:self.n_team_agent[0]] += 0.02  # 给摧毁敌方单位全部agent一个正的奖励值
        #                 # else:
        #                 #     reward[0:self.n_team_agent[0]] += 0.01  # 给摧毁敌方单位全部agent一个正的奖励值
        #         else:
        #             reward[team] -= 0.01  # Enable_Reward_Sharing为true，则每个队伍中的所有单位共享一个奖励值
        #             reward[1 - team] += 0.01


            if event_parsed['Event'] == 'EndEpisode':  # 设置结束一轮模拟的奖励，结束模拟的事件信息中包含获胜队伍和结束的原因
                EndReason = event_parsed['EndReason']  # 模拟结束的原因
                WinTeam = int(event_parsed['WinTeam'])  # 获胜队伍

                if WinTeam < 0:  # WinTeam<0(通常为-1）是由于达到最大模拟步数；存活单位数量最多的队伍获胜
                    agents_left_each_team = [0 for _ in range(self.n_teams)]
                    for a in self.agents:
                        if a.alive: agents_left_each_team[a.team] += 1  # 记录每个队伍存活单位的数量
                    WinTeam = np.argmax(agents_left_each_team)  # 获取存活单位最多的队伍的编号

                    # 如果每个队伍存活单位的数量相等，则比较剩余单位血量总和的大小
                    if agents_left_each_team[WinTeam] == agents_left_each_team[1 - WinTeam]:
                        hp_each_team = [0 for _ in range(self.n_teams)]
                        for a in self.agents:
                            if a.alive: hp_each_team[a.team] += a.hp
                        WinTeam = np.argmax(hp_each_team)
                        if hp_each_team[WinTeam] == hp_each_team[1 - WinTeam]:  # 依然相等则没有胜利的队伍
                            WinTeam = -1

                if WinTeam >= 0:  # 如果某个队伍获胜，则为队伍设置对应的奖励/惩罚
                    WinningResult = {
                        "team_ranking": [0, 1] if WinTeam == 0 else [1, 0],  # 按照队伍编号排名
                        "end_reason": EndReason
                    }
                    if not self.Enable_Reward_Sharing:  # Enable_Reward_Sharing为false，则每个agent都需要设置奖励值
                        reward[0:self.n_team_agent[0]] = reward[0:self.n_team_agent[0]] + (-2 * WinTeam + 1)  # 给获胜单位全部agent一个正的奖励值
                        reward[self.n_team_agent[0]:self.n_agents] = reward[self.n_team_agent[0]:self.n_agents] - (-2 * WinTeam + 1)  # 给失败单位全部agent一个正的奖励值
                    else:
                        reward[WinTeam] += 1  # 获胜队伍获得奖励
                        reward[1 - WinTeam] -= 1  # 失败队伍受到惩罚
                else:  # 如果没有获胜队伍，每个队伍都受到惩罚
                    WinningResult = {
                        "team_ranking": [-1, -1],  # 排名中队伍编号都设置-1，说明没有队伍获胜
                        "end_reason": EndReason
                    }
                    reward -= 1

        # reward = reward.flatten(order='C')  # 因后续处理的需要，此处需要将reward数组展平，输出shape=(所有队伍中智能体总量)。
        return reward, WinningResult

