import copy

import numpy as np

from onpolicy.config_env import get_env_config
from onpolicy.envs.mpe import rendering
from onpolicy.envs.ruler import Circle_ruler, Draw_ruler, Rectangle_ruler

parser_ev = get_env_config()


# 使用新的环境核心
def make_env(env_size=[]):
    if env_size != []:
        if env_size[0] == "Circle":
            move_ruler = Circle_ruler(env_size[1], env_size[2])
        elif env_size[0] == "Rectangle":
            move_ruler = Rectangle_ruler(env_size[1], env_size[2], env_size[3])
        elif env_size[0] == "Draw":
            move_ruler = Draw_ruler()
            move_ruler.load_move_list(parser_ev["Draw_file_path"])
        return move_ruler

    if parser_ev["env_type"] == "Circle":
        move_ruler = Circle_ruler(parser_ev["Circle"][0], parser_ev["Circle"][1])
    elif parser_ev["env_type"] == "Rectangle":
        move_ruler = Rectangle_ruler(
            parser_ev["Rectangle"][0],
            parser_ev["Rectangle"][1],
            parser_ev["Rectangle"][2],
        )
    elif parser_ev["env_type"] == "Draw":
        move_ruler = Draw_ruler()
        move_ruler.load_move_list(parser_ev["Draw_file_path"])
    return move_ruler


class EnvCore(object):
    """
    # 环境中的智能体
    """

    def __init__(self, env_size=[], shared_viewer=True):
        self.agent_num = parser_ev["agent_num"]
        self.enemy_num = parser_ev["enemy_num"]
        self.obs_dim = parser_ev["obs_dim"]
        self.action_dim = parser_ev["action_dim"]
        self.max_map = parser_ev["max_map"]
        self.step_count = 0
        self.agent_pos = np.random.uniform(0, self.max_map, size=(self.agent_num, 2))
        self.init_pos = copy.deepcopy(self.agent_pos)
        self.move_ruler = make_env(env_size)
        self.enemy_pos = self.move_ruler.get_init_pos()
        self.enemy_pos = [
            self.enemy_pos[0] + self.max_map / 2,
            self.enemy_pos[1] + self.max_map / 2,
        ]
        self.cm_mask = np.ones((self.agent_num, self.agent_num + self.enemy_num))
        self.press_time = [0] * self.agent_num
        # rendering
        self.shared_viewer = shared_viewer
        if self.shared_viewer:
            self.viewers = [None]
        else:
            self.viewers = [None] * self.agent_num
        self._reset_render()

    def reset(self):
        """
        # self.agent_num设定为2个智能体时，返回值为一个list，每个list里面为一个shape = (self.obs_dim, )的观测数据
        # When self.agent_num is set to 2 agents, the return value is a list, each list contains a shape = (self.obs_dim, ) observation data
        """
        self.agent_pos = copy.deepcopy(self.init_pos)
        self.enemy_pos = self.move_ruler.get_init_pos()
        self.enemy_pos = [
            self.enemy_pos[0] + self.max_map / 2,
            self.enemy_pos[1] + self.max_map / 2,
        ]
        obs = []
        share_obs = []
        self.step_count = 0
        self.press_time = [0] * self.agent_num
        if self.obs_dim == 16:
            for i in range(self.agent_num):
                share_obs = np.concatenate([share_obs, self.agent_pos[i], [0, 0]])
            share_obs = np.concatenate([share_obs, self.enemy_pos, [0, 0]])
            for i in range(self.agent_num):
                obs.append(copy.deepcopy(share_obs))
            return obs
        elif self.obs_dim == 8:
            for i in range(self.agent_num):
                obs.append(
                    np.concatenate([self.agent_pos[i], [0, 0], self.enemy_pos, [0, 0]])
                )
            return obs

    def get_obs(self):
        """
        没有调用，废弃
        """
        obs = []
        for i in range(self.agent_num):
            obs.append(np.concatenate([self.agent_pos[i], self.enemy_pos]))
        return obs

    def communicate_mask(self, obs):
        for i in range(self.agent_num):
            zero_indices = np.where(self.cm_mask[i] == 0)
            for index in zero_indices:
                obs[i][index * 4] = -100
                obs[i][index * 4 + 1] = -100
                obs[i][index * 4 + 2] = -100
                obs[i][index * 4 + 3] = -100
        return obs

    def EnvTranslate(self, observation):
        for i in range(self.agent_num):
            if parser_ev["obs_dim"] == 8:
                self.agent_pos[i] = copy.deepcopy(observation[i][0:2])
            elif parser_ev["obs_dim"] == 16:
                self.agent_pos[i] = copy.deepcopy(observation[i][4 * i : 4 * i + 2])
        self.enemy_pos = copy.deepcopy(observation[0][-4:-2])

    def calculate_distance_matrix(self):
        distance_matrix = [
            [2 * self.max_map] * self.agent_num for _ in range(self.agent_num)
        ]

        for i in range(self.agent_num):
            for j in range(i + 1, self.agent_num):
                distance = np.sqrt(
                    (self.agent_pos[i][0] - self.agent_pos[j][0]) ** 2
                    + (self.agent_pos[i][1] - self.agent_pos[j][1]) ** 2
                )
                distance_matrix[i][j] = distance
                distance_matrix[j][i] = distance

        return distance_matrix

    def calculate_angle_matrix(self, Is_surround, Angle_point):
        angle_matrix = [[0] * self.agent_num for _ in range(self.agent_num)]

        for i in range(self.agent_num):
            for j in range(i + 1, self.agent_num):
                if Is_surround[i] == 1 and Is_surround[j] == 1:
                    angle_matrix[i][j] = Angle_point[i] - Angle_point[j]
                    if angle_matrix[i][j] > 180:
                        angle_matrix[i][j] = angle_matrix[i][j] - 360
                    elif angle_matrix[i][j] < -180:
                        angle_matrix[i][j] = angle_matrix[i][j] + 360
                    angle_matrix[j][i] = -angle_matrix[i][j]

        return angle_matrix

    def calculate_angle(self, agent1, agent2):
        """
        计算以1为原点，2的角度将角度限制在0到360度之间
        """
        # 计算以1为原点，2的角度
        dx = agent2[0] - agent1[0]
        dy = agent2[1] - agent1[1]
        # 使用反三角函数计算角度（以弧度为单位）
        angle_rad = np.arctan2(dy, dx)
        # 将弧度转换为角度
        angle_deg = np.degrees(angle_rad)
        # 将角度限制在0到360度之间
        angle_deg = angle_deg % 360
        return angle_deg

    def step(self, actions):
        """
        # self.agent_num设定为2个智能体时，actions的输入为一个2纬的list，每个list里面为一个shape = (self.action_dim, )的动作数据
        # 默认参数情况下，输入为一个list，里面含有两个元素，因为动作维度为5，所里每个元素shape = (5, )
        # When self.agent_num is set to 2 agents, the input of actions is a 2-dimensional list, each list contains a shape = (self.action_dim, ) action data
        # The default parameter situation is to input a list with two elements, because the action dimension is 5, so each element shape = (5, )
        """
        sub_agent_obs = []
        sub_agent_reward = []
        sub_agent_done = []
        sub_agent_info = []
        self.step_count += 1
        Angle_point = np.zeros((self.agent_num))
        Is_surround = np.zeros((self.agent_num))
        distance_matrix = self.calculate_distance_matrix()

        # 更新角度
        new_pos = self.move_ruler.move()
        new_pos = [new_pos[0] + self.max_map / 2, new_pos[1] + self.max_map / 2]
        enemy_speed = np.sqrt(
            (new_pos[0] - self.enemy_pos[0]) ** 2
            + (new_pos[1] - self.enemy_pos[1]) ** 2
        )
        enemy_theta = self.calculate_angle(self.enemy_pos, new_pos)

        for i in range(self.agent_num):
            # 限制动作方向在 -π 到 π 之间
            angle = actions[i][0] * np.pi

            # 限制动作大小最大为0,2
            magnitude = 1 + actions[i][1]

            # 计算新的位置
            self.agent_pos[i][0] += magnitude * np.cos(angle)
            self.agent_pos[i][1] += magnitude * np.sin(angle)

            # 将 agent_pos 中的所有坐标取整到最接近的整数
            # self.agent_pos[i] = np.round(self.agent_pos[i]).astype(int)
            reward_collision = 0
            if self.agent_pos[i][0] > self.max_map:
                self.agent_pos[i][0] = self.max_map
                reward_collision += -10
            elif self.agent_pos[i][0] < 0:
                self.agent_pos[i][0] = 0
                reward_collision += -10
            if self.agent_pos[i][1] > self.max_map:
                self.agent_pos[i][1] = self.max_map
                reward_collision += -10
            elif self.agent_pos[i][1] < 0:
                self.agent_pos[i][1] = 0
                reward_collision += -10

            min_distance = min(distance_matrix[i])
            # 计算到圆心的距离
            distance_to_center = np.sqrt(
                (self.agent_pos[i][0] - self.enemy_pos[0]) ** 2
                + (self.agent_pos[i][1] - self.enemy_pos[1]) ** 2
            )
            if min_distance >= 0:
                # distance_to_circle = abs(
                #     distance_to_center - parser_ev["Radius_detect"]
                # )
                ideal_distance = 0  # 理想距离，可以根据具体需求调整
                reward = (
                    -np.fabs(distance_to_center - ideal_distance) + reward_collision
                )
                """
                处理包围的问题，在距离小于阈值的时候考虑进入包围程序。
                包围程序：对于三个智能体的情况，有一个的横坐标或者纵坐标与其他两个不同，且距离目标都很近，则认定为包围成功。
                """
                # 这里涉及到角速度的问题，应该是让角速度相同
                # reward -= np.abs(
                #     actions[i][0] * 180 + 180 - enemy_theta
                # ) / 10 + 100 * np.abs(actions[i][1] + 1 - enemy_speed)

            #     if (
            #         distance_to_center <= parser_ev["Radius_detect"] * 1.5
            #         and distance_to_center >= parser_ev["Radius_detect"] * 0.75
            #     ):
            #         Angle_point[i] = self.calculate_angle(
            #             self.enemy_pos, self.agent_pos[i]
            #         )
            #         Is_surround[i] = 1
            #         reward += 100
            #     elif distance_to_center < parser_ev["Radius_detect"] * 0.5:
            #         reward += -300
            # else:
            #     reward = -300
            # 电子压制限制
            if self.obs_dim == 16:
                if (
                    parser_ev["Radius_pressing"][0]
                    <= distance_to_center
                    <= parser_ev["Radius_pressing"][1]
                    or 0 < self.press_time[i]
                ) and self.press_time[i] < parser_ev["time_press"]:
                    self.cm_mask[i] = (
                        [0] * i + [1] + [0] * (self.agent_num + self.enemy_num - i - 1)
                    )
                    self.press_time[i] += 1
                else:
                    self.cm_mask[i] = [1] * (self.agent_num + self.enemy_num)
                if parser_ev["Radius_find"] < distance_to_center:
                    self.cm_mask[i][-1] = 0
            if self.obs_dim == 8:
                sub_agent_obs.append(
                    np.concatenate(
                        [
                            [pos / self.max_map for pos in self.agent_pos[i]],
                            [actions[i][0] * 180 + 180, actions[i][1]],
                            [pos / self.max_map for pos in self.enemy_pos],
                            [enemy_theta, enemy_speed],
                        ]
                    )
                )
            sub_agent_reward.append([reward])

            # sub_agent_obs.append(np.random.random(size=(self.obs_dim,)))
            # sub_agent_reward.append([np.random.rand()])
            sub_agent_done.append(False)
            sub_agent_info.append({})
            angle_matrix = self.calculate_angle_matrix(Is_surround, Angle_point)

        for i in range(self.agent_num):
            count_positive_angle = sum(
                1 for angle in angle_matrix[i] if 100 <= angle <= 140
            )
            count_negative_angle = sum(
                1 for angle in angle_matrix[i] if -140 <= angle <= -100
            )

            if count_positive_angle + count_negative_angle == 1:
                sub_agent_reward[i][0] += 200
            elif count_positive_angle == 1 and count_negative_angle == 1:
                sub_agent_reward[i][0] += 500
            elif count_positive_angle == 2 or count_negative_angle == 2:
                sub_agent_reward[i][0] += -100

        # 更新位置
        self.enemy_pos = copy.deepcopy(new_pos)
        if self.obs_dim == 16:
            sub_obs = []
            for i in range(self.agent_num):
                sub_obs = np.concatenate([sub_obs, self.agent_pos[i], actions[i]])
            sub_obs = np.concatenate(
                [
                    sub_obs,
                    self.enemy_pos,
                    [(enemy_theta - 180) / 180, enemy_speed],
                ]
            )
            for i in range(self.agent_num):
                sub_agent_obs.append(copy.deepcopy(sub_obs))
            # 通讯掩码
            sub_agent_obs = self.communicate_mask(sub_agent_obs)
        return [
            sub_agent_obs,
            self.cm_mask,
            sub_agent_reward,
            sub_agent_done,
            sub_agent_info,
        ]

    # reset rendering assets
    def _reset_render(self):
        self.render_geoms = None
        self.render_geoms_xform = None

    def render(self, mode="human", close=False):
        if close:
            if self.viewers is not None:
                for viewer in self.viewers:
                    viewer.close()
            return

        if self.shared_viewer:
            if self.viewers[0] is None:
                self.viewers[0] = rendering.Viewer(self.max_map, self.max_map)
            self.viewers[0].draw_agents(self.agent_pos, self.enemy_pos)
            return self.viewers[0].render(return_rgb_array=mode == "rgb_array")
        else:
            results = []
            for i in range(self.agent_num):
                if self.viewers[i] is None:
                    self.viewers[i] = rendering.Viewer(800, 800)
                self.viewers[i].draw_agents([self.agent_pos[i]], self.enemy_pos)
                results.append(
                    self.viewers[i].render(return_rgb_array=mode == "rgb_array")
                )
            return results
