import sys
import time

import torch
import numpy as np

# 请指明模型路径
from matplotlib import colors

from algorithms.algorithm.r_actor_critic import R_Actor
from config import get_config
from mappo_envs.env_discrete import DiscreteActionEnv
import matplotlib.pyplot as plt
import math


def parse_args(args, parser):
    parser.add_argument("--scenario_name", type=str, default="MyEnv", help="Which scenario to run on")
    parser.add_argument("--num_landmarks", type=int, default=3)
    parser.add_argument("--num_agents", type=int, default=4, help="number of players")

    all_args = parser.parse_known_args(args)[0]

    return all_args


def load_model(args, run_dir='../models'):
    global env
    run_dir ='../results/MyEnv/A4/test1'
    parser = get_config()
    all_args = parse_args(args, parser)
    device = torch.device("cuda:0")
    actor = R_Actor(all_args, env.observation_space[0], env.action_space[0], device)
    policy_actor_state_dict = torch.load(run_dir + '/actor.pt', map_location='cuda')
    actor.load_state_dict(policy_actor_state_dict)
    """
    模型评测函数，加载已经训练的actor模型
    """
    return actor


# 创建图形窗口和轴对象
fig, ax = plt.subplots(figsize=(8, 8))

min_X = -400
max_X = 400
min_Y = -400
max_Y = 400


def set_XY_boundary():
    global min_X, max_X, min_Y, max_Y
    # min_X = -500 + base_env.escape_agent.x
    # max_X = 500 + base_env.escape_agent.x
    # min_Y = -500 + base_env.escape_agent.y
    # max_Y = 500 + base_env.escape_agent.y


def update_locations(step_num):
    global min_X, max_X, min_Y, max_Y
    # 清除图形内容和轴属性设置
    ax.clear()
    circle_boundary = plt.Circle((0, 0), 405, color='black', linewidth=5, fill=False)
    ax.add_patch(circle_boundary)
    x_all = []
    y_all = []
    color_all = []
    for i in range(base_env.agent_num):
        agent = base_env.pursuit_agents[i]
        x_all.append(agent.x)
        y_all.append(agent.y)
        color_all.append('red')
        heading = math.radians(agent.ship_heading)
        dy = 20 * math.cos(heading)
        dx = 20 * math.sin(heading)
        ax.arrow(agent.x, agent.y, dx, dy, head_width=1, head_length=1, fc='red', ec='red' )
    x_all.append(base_env.escape_agent.x)
    y_all.append(base_env.escape_agent.y)
    color_all.append('blue')
    circle = plt.Circle((base_env.escape_agent.x, base_env.escape_agent.y),
                        base_env.max_distance, color='blue', linewidth=2, fill=False)
    ax.add_patch(circle)
    # 绘制散点图
    ax.scatter(x_all, y_all, c=color_all)

    # 设置坐标轴范围等其他属性
    if step_num % 100 == 0:
        set_XY_boundary()
    ax.set_xlim(min_X, max_X)
    ax.set_ylim(min_Y, max_Y)
    ax.set_xlabel('X')
    ax.set_ylabel('Y')
    ax.set_title('Agent Locations - '+str(step_num))

    # 显示图形
    plt.draw()
    plt.pause(0.001)


def _t2n(x):
    return x.detach().cpu().numpy()


if __name__ == "__main__":
    global env
    env = DiscreteActionEnv()
    base_env = env.env.env
    actor = load_model(sys.argv[1:])
    mask_list= []
    for i in range(base_env.agent_num):
        mask_list.append([1.0])
    masks = np.array(mask_list)
    episode_length = 100000
    done = False
    rnn_states_actor = np.zeros((base_env.agent_num, 1, 64), dtype=float)
    base_env.set_eval_mode()
    obs = base_env.reset()
    base_env.escape_speed = 3.0

    for i in range(episode_length):
        actor_obs = np.array(obs)
        actions, action_log_probs, rnn_states_actor = actor(actor_obs, rnn_states_actor, masks)
        env_actions = []
        cnt = 0
        for action in actions:
            action = action.tolist()
            if base_env.pursuit_agents[cnt].distance_remaining < 50:
                env_actions.append([(action[0]-4) , (action[1]-4)])
            else:
                speed, heading=base_env.step_to_goal(cnt)
                env_actions.append([speed, heading])
            cnt += 1
        obs, rewards, done = base_env.step(env_actions)
        # if base_env.scenairo_step_num == 50 :
        #     obs = base_env.decreaseOneAgent()
        #     masks = masks[0:base_env.agent_num]
        #     rnn_states_actor = rnn_states_actor[0:base_env.agent_num]
        update_locations(i)
        time.sleep(0.1)
        # if i % 50 == 0:
        #     print("step_num: "+str(i))
        #     print(base_env.rewards)
        #     print(base_env.neighbor_pursuit_degree)
        #     for agent in base_env.pursuit_agents:
        #         agent.print_info()
        #     print("")

        if base_env.succeed():
            print("成功形成围捕")
            for agent in base_env.pursuit_agents:
                print(agent.distance_remaining)
            if base_env.num_success >= 10:
            # time.sleep(20)
                obs = base_env.reset()
            set_XY_boundary()
        if done:
            obs = base_env.reset()
            set_XY_boundary()

