import argparse
import numpy as np
import torch
from tensorboardX import SummaryWriter
import SAC
import utils
import math
import env


def plot_trajectory(env, episode):
    """绘制无人机3D轨迹图"""
    import matplotlib.pyplot as plt
    from mpl_toolkits.mplot3d import Axes3D
    import numpy as np
    import os

    fig = plt.figure(figsize=(10, 10))
    ax = fig.add_subplot(111, projection='3d')
    area_size = env.area_size

    # 绘制区域立方体框架
    def plot_cube():
        # 底面
        ax.plot([0, area_size], [0, 0], 0, 'k--', alpha=0.3)
        ax.plot([0, area_size], [area_size, area_size], 0, 'k--', alpha=0.3)
        ax.plot([0, 0], [0, area_size], 0, 'k--', alpha=0.3)
        ax.plot([area_size, area_size], [0, area_size], 0, 'k--', alpha=0.3)

        # 顶面
        ax.plot([0, area_size], [0, 0], 40, 'k--', alpha=0.3)
        ax.plot([0, area_size], [area_size, area_size], 40, 'k--', alpha=0.3)
        ax.plot([0, 0], [0, area_size], 40, 'k--', alpha=0.3)
        ax.plot([area_size, area_size], [0, area_size], 40, 'k--', alpha=0.3)

        # 垂直边
        for x in [0, area_size]:
            for y in [0, area_size]:
                ax.plot([x, x], [y, y], [0, 40], 'k--', alpha=0.3)

    plot_cube()

    # 绘制无线设备
    devices = env.device_pos
    for i, dev in enumerate(devices):
        ax.scatter(dev[0], dev[1], 0,
                   c='green', marker='^', s=80, depthshade=True,
                   label='Device' if i == 0 else '')

    # 绘制主网络设备
    pr = env.PR_pos
    pt = env.PT_pos
    ax.scatter(pr[0], pr[1], 0, c='red', marker='s', s=120, label='PR')
    ax.scatter(pt[0], pt[1], 0, c='blue', marker='o', s=120, label='PT')
    ax.text(pt[0], pt[1], 0, '', fontsize=9, zorder=10)
    ax.text(pr[0], pr[1], 0, '', fontsize=9, zorder=10)

    # 绘制无人机轨迹
    colors = ['#FF6F00', '#6A1B9A']
    markers = ['*', 'X']

    for u in range(env.U):
        traj = np.array(env.trajectory[u])

        if traj.shape[1] == 2:
            z = np.full(len(traj), 40)
            traj = np.hstack((traj, z.reshape(-1, 1)))

        # 轨迹主体
        ax.plot(traj[:, 0], traj[:, 1], traj[:, 2],
                color=colors[u], linewidth=3,
                label=f'Trajectory of UAV{u + 1} ')

        # 起始标记
        ax.scatter(traj[0, 0], traj[0, 1], traj[0, 2],
                   color=colors[u], marker=markers[0],
                   s=200, edgecolor='black', label=f'Start point of UAV{u + 1} ')
        ax.scatter(traj[-1, 0], traj[-1, 1], traj[-1, 2],
                   color=colors[u], marker=markers[1],
                   s=200, edgecolor='black', label='')

    # 可视化参数设置
    ax.set_xlabel('X (m)', labelpad=15, fontsize=22)
    ax.set_ylabel('Y (m)', labelpad=15, fontsize=22)
    ax.set_zlabel('Z (m)', labelpad=15, fontsize=22)
    ax.tick_params(axis='x', labelsize=22)
    ax.tick_params(axis='y', labelsize=22)
    ax.tick_params(axis='z', labelsize=22)
    ax.set_xlim(-50, area_size + 50)
    ax.set_ylim(-50, area_size + 50)
    ax.set_zlim(0, 80)
    ax.view_init(elev=25, azim=-45)
    ax.grid(True, alpha=0.5)


    ax.legend(loc='upper left', bbox_to_anchor=(1.05, 1), borderaxespad=0., fontsize=22)

    # 保存图像
    os.makedirs("3d_trajectories", exist_ok=True)
    plt.savefig(f"3d_trajectories/episode_{episode}.png",
                dpi=300, bbox_inches='tight')
    plt.close()
if __name__ == "__main__":
    writer = SummaryWriter("SAC_network")
    parser = argparse.ArgumentParser()

    parser.add_argument("--policy", default="SAC", help='Algorithm (default: DDPG)')
    parser.add_argument("--seed", default=0, type=int, help='Seed number for PyTorch and NumPy (default: 0)')
    parser.add_argument("--gpu", default="0", type=int, help='GPU ordinal for multi-GPU computers (default: 0)')
    parser.add_argument("--start_time_steps", default=50000, type=int, metavar='N', help='Number of exploration time steps sampling random actions (default: 1000)')
    parser.add_argument("--buffer_size", default=1000000, type=int, help='Size of the experience replay buffer (default: 100000)')
    parser.add_argument("--batch_size", default=128, metavar='N', help='Batch size (default: 16)')
    parser.add_argument("--save_model", action="store_true", help='Save model and optimizer parameters')
    parser.add_argument("--load_model", default="", help='Model load file name; if empty, does not load')
    parser.add_argument("--save_dir", default="./saved_models", type=str, help='Directory to save models')
    parser.add_argument("--test_only", action="store_true", help='Run in test mode without training')
    parser.add_argument("--test_model", default="", type=str, help='Model to load for testing')




    parser.add_argument("--exploration_noise", default=0.2, metavar='G', help='Std of Gaussian exploration noise')
    parser.add_argument("--discount", default=0.99, metavar='G', help='Discount factor for reward (default: 0.99)')
    parser.add_argument("--tau", default=1e-3, type=float, metavar='G',  help='Learning rate in soft/hard updates of the target networks (default: 0.001)')
    parser.add_argument("--lr", default=3e-4, type=float, metavar='G', help='Learning rate for the networks (default: 0.001)')
    parser.add_argument("--decay", default=1e-5, type=float, metavar='G', help='Decay rate for the networks (default: 0.00001)')
    parser.add_argument("--alpha", type=float, default=0.2, help="Temperature parameter")
    parser.add_argument("--lr_alpha", type=float, default=3e-4, help="Learning rate for alpha")

    args = parser.parse_args()


    import os

    os.makedirs(args.save_dir, exist_ok=True)

    env = env.CognitiveUAVEnv()

    state_dim = env.observation_space.shape[0]
    action_dim = env.action_space.shape[0]
    max_action = float(env.action_space.high[0])
    T = env.T  # 获取任务周期

    device = torch.device(f"cuda:0" if torch.cuda.is_available() else "cpu")

    # 获取环境中的U和N
    U = env.U
    N = env.N
    P_max = env.P_max

    # SAC参数配置
    kwargs = {
        "state_dim": state_dim,
        "action_dim": action_dim,
        "U": U,
        "N": N,
        "P_max": P_max,
        "max_action": max_action,
        "device": device,
        "gamma": args.discount,
        "tau": args.tau,
        "alpha": args.alpha,
        "lr": args.lr,
        "lr_alpha": args.lr_alpha
    }

    agent = SAC.SAC(**kwargs)

    # 加载模型
    if args.load_model:
        agent.load(args.load_model)
        print(f"Loaded model from {args.load_model}")

    #  测试模式
    if args.test_only:
        if not args.test_model:
            print("Error: Test model not specified! Use --test_model to specify model file")
            exit(1)

        # 加载测试模型
        agent.load(args.test_model)
        print(f"Loaded test model from {args.test_model}")

        # 运行测试
        for test_eps in range(10):
            state = env.reset()
            episode_reward = 0
            episode_Rate = 0
            episode_Interference = 0
            episode_Energy = 0

            # 重置轨迹记录
            env.trajectory = {u: [] for u in range(env.U)}
            for u in range(env.U):
                env.trajectory[u].append(env.uav_pos[u].copy())

            for t in range(T):
                action = agent.select_action(np.array(state))
                next_state, reward, done, info = env.step(action, test_eps, t)

                state = next_state
                episode_reward += reward
                episode_Rate += np.sum(info["rates"])
                episode_Interference += info["interference"]
                episode_Energy += np.sum(info["energy_used"])

                if done:
                    break

            # 绘制轨迹
            plot_trajectory(env, f"test_{test_eps}")

            print("Test Episode:", test_eps,
                  "Reward:", f"{episode_reward / T:.2f}",
                  "Rate:", f"{episode_Rate / T / 1e6:.2f} Mbps",
                  "Interference:", f"{episode_Interference / T:.2f} dBW",
                  "Energy:", f"{episode_Energy / T:.1f} J")

        exit(0)


    # 训练模式
    replay_buffer = utils.ReplayBuffer(state_dim, action_dim, max_size=args.buffer_size)

    instant_rewards = []
    start_epsilon = 1
    end_epsilon = 0
    max_reward = 0
    best_reward = -float('inf')

    for eps in range(15001):
        state = env.reset()
        episode_reward = 0
        episode_Rate = 0
        episode_Interference = 0
        episode_Energy = 0

        # 重置轨迹记录
        env.trajectory = {u: [] for u in range(env.U)}
        for u in range(env.U):
            env.trajectory[u].append(env.uav_pos[u].copy())

        for t in range(T):
            action = agent.select_action(np.array(state))
            next_state, reward, done, info = env.step(action, eps, t)

            if t == T - 1:
                done = 1
            else:
                done = 0

            # 存储到经验回放缓冲区
            replay_buffer.add(state, action, next_state, reward, done)

            state = next_state
            episode_reward += reward
            episode_Rate += np.sum(info["rates"])
            episode_Interference += info["interference"]
            episode_Energy += np.sum(info["energy_used"])

            if reward > max_reward:
                max_reward = reward

            # 更新策略
            agent.update_parameters(replay_buffer, args.batch_size)

        # 定期保存模型
        if eps % 1000 == 0:
            model_path = f"{args.save_dir}/model_{eps}.pth"
            agent.save(model_path)
            print(f"Saved model at episode {eps} to {model_path}")

            # 绘制轨迹
            plot_trajectory(env, eps)

        # 保存最佳模型
        if episode_reward > best_reward:
            best_reward = episode_reward
            best_model_path = f"{args.save_dir}/best_model.pth"
            agent.save(best_model_path)
            print(f"Saved best model with reward {best_reward:.2f} to {best_model_path}")

        # 记录指标
        writer.add_scalar("Reward", episode_reward / T, eps)
        writer.add_scalar("Rate (Mbps)", episode_Rate / T / 1e6, eps)
        writer.add_scalar("Interference (dBW)", episode_Interference / T, eps)
        writer.add_scalar("Energy (J)", episode_Energy / T, eps)

        print("Training Episode:", eps,
              "Reward:", f"{episode_reward / T:.2f}",
              "Rate:", f"{episode_Rate / T / 1e6:.2f} Mbps",
              "Interference:", f"{episode_Interference / T:.2f} dBW",
              "Energy:", f"{episode_Energy / T:.1f} J")