import random

import numpy as np
from scipy.spatial import ConvexHull

from onpolicy.envs.mpe.core import Agent, Landmark, World
from onpolicy.envs.mpe.scenario import BaseScenario


class Scenario(BaseScenario):
    """
    环境总结：
    num_agents-1个智能体前往一个共同的目标点，一个智能体作为对抗者，阻止其他智能体到达目标点

    没有用到通信信息c
    """

    def make_world(self, args):
        world = World()
        # set any world properties first
        world.dim_c = 2
        world.num_agents = args.num_agents  # 3
        world.world_length = args.episode_length
        num_target = args.num_landmarks
        self.eletime = args.ele_time
        self.ele_radius = args.ele_radius
        self.random_obs = args.random_obs
        # add agents
        world.agents = [Agent() for i in range(world.num_agents)]
        for i, agent in enumerate(world.agents):
            agent.name = "agent %d" % i
            agent.collide = True
            agent.silent = True
            agent.size = 0.05
            agent.atc_time = 0
        # add landmarks
        world.targets = [Agent() for i in range(num_target)]
        for i, target in enumerate(world.targets):
            target.name = "target %d" % i
            target.collide = True
            target.movable = True
            target.size = 0.05

        # make initial conditions
        self.reset_world(world)
        return world

    def reset_world(self, world):
        # random properties for agents
        world.assign_agent_colors()
        # random properties for targets
        world.assign_target_colors()
        # set goal landmark
        goal = np.random.choice(world.targets)
        goal.color = np.array([0.15, 0.65, 0.15])

        for agent in world.agents:
            agent.goal_a = goal
        # set random initial states
        for agent in world.agents:
            agent.state.p_pos = np.random.uniform(-1, +1, world.dim_p)
            agent.state.p_vel = np.zeros(world.dim_p)
            agent.state.c = np.zeros(world.dim_c)
            agent.atc_time = 0
        for target in world.targets:
            target.state.p_pos = np.random.uniform(-1, +1, world.dim_p)
            target.state.p_vel = np.zeros(world.dim_p)

    def benchmark_data(self, agent, world):
        # returns data for benchmarking purposes
        if agent.adversary:
            return np.sum(np.square(agent.state.p_pos - agent.goal_a.state.p_pos))
        else:
            dists = []
            for l in world.landmarks:
                dists.append(np.sum(np.square(agent.state.p_pos - l.state.p_pos)))
            dists.append(
                np.sum(np.square(agent.state.p_pos - agent.goal_a.state.p_pos))
            )
            return tuple(dists)

    # return all agents that are not adversaries
    def good_agents(self, world):
        return [agent for agent in world.agents if not agent.adversary]

    def reward(self, agent, world):
        # Agents are rewarded based on minimum agent distance to each landmark
        return self.agent_reward(agent, world)

    def calculate_reward(self, world, ele_radius):
        rew = 0
        distance_to_target = []

        # 计算智能体和目标之间的距离
        for a in world.agents:
            min_dists = min(
                [
                    np.sqrt(np.sum(np.square(a.state.p_pos - t.state.p_pos)))
                    for t in world.targets
                ]
            )
            rew -= min_dists
            distance_to_target.append(min_dists)
            if 0.2 < min_dists < 0.25:
                rew += 0.5
            if min_dists < ele_radius and a.atc_time == 0:
                a.atc_time = 1

        # 计算智能体之间的距离，并形成边
        agent_positions = np.array([a.state.p_pos for a in world.agents])
        edges = []
        for i in range(len(agent_positions)):
            for j in range(i + 1, len(agent_positions)):
                dist = np.sqrt(
                    np.sum(np.square(agent_positions[i] - agent_positions[j]))
                )
                if dist < 0.5:
                    edges.append((i, j))

        # 检查是否形成包围区域
        if len(edges) >= 3:
            try:
                hull = ConvexHull(agent_positions)
                hull_path = hull.vertices
                hull_points = agent_positions[hull_path]

                # 检查目标是否在包围区域内
                for t in world.targets:
                    if self.is_point_in_polygon(t.state.p_pos, hull_points):
                        rew += 2  # 大奖励
            except:
                pass

        return rew

    def is_point_in_polygon(self, point, polygon):
        from matplotlib.path import Path

        path = Path(polygon)
        return path.contains_point(point)

    def agent_reward(self, agent, world):
        # Calculate negative reward for adversary
        rew = self.calculate_reward(world, self.ele_radius)
        # rew = 0
        # distance_to_target = []
        # for a in world.agents:
        #     for t in world.targets:
        #         min_dists = np.sqrt(np.sum(np.square(a.state.p_pos - t.state.p_pos)))
        #         rew -= min_dists
        #         distance_to_target.append(min_dists)
        #         if 0.2 < min_dists < 0.25:
        #             rew += 0.5
        #         if min_dists < self.ele_radius:
        #             agent.atc_time = 1
        # if all(0.2 < dist < 0.25 for dist in distance_to_target):
        #     rew += 0.5
        if agent.collide:
            for a in world.agents:
                if a is agent:
                    continue
                if self.is_collision(a, agent):
                    rew -= 5
            for t in world.targets:
                if self.is_collision(t, agent):
                    rew -= 5
        return rew

    def is_collision(self, agent1, agent2):
        delta_pos = agent1.state.p_pos - agent2.state.p_pos
        dist = np.sqrt(np.sum(np.square(delta_pos)))
        dist_min = agent1.size + agent2.size
        return True if dist < dist_min else False

    def observation(self, agent, world):
        # get positions of all entities in this agent's reference frame
        entity_pos = []
        for entity in world.targets:
            entity_pos.append(
                entity.state.p_pos
                - agent.state.p_pos
                + [
                    np.random.normal(-self.random_obs, self.random_obs),
                    np.random.normal(-self.random_obs, self.random_obs),
                ]
            )
        # entity colors
        entity_color = []
        for entity in world.targets:
            entity_color.append(entity.color)
        # communication of all other agents
        other_pos = []
        for other in world.agents:
            if other is agent:
                continue
            other_pos.append(
                other.state.p_pos
                - agent.state.p_pos
                + [
                    np.random.normal(-self.random_obs, self.random_obs),
                    np.random.normal(-self.random_obs, self.random_obs),
                ]
            )

        if 0 < agent.atc_time < self.eletime:
            agent.atc_time += 1
            return np.concatenate(
                [agent.state.p_vel]  # 2
                + [agent.state.p_pos]  # 2
                + list(np.zeros_like(entity_pos))  # 2*1
                + list(np.zeros_like(other_pos))  # 2*3
            )
        return np.concatenate(
            [agent.state.p_vel] + [agent.state.p_pos] + entity_pos + other_pos
        )
        if not agent.adversary:
            return np.concatenate(
                [agent.goal_a.state.p_pos - agent.state.p_pos] + entity_pos + other_pos
            )
        else:
            return np.concatenate(entity_pos + other_pos)
