import numpy as np
import datetime
from pathlib import Path
from metrics import MetricLogger
from Env.environment import Environment
from MADDPG.maddpg import MADDPG
from MADDPG.buffer import MultiAgentReplayBuffer
# import visdom
import torch as T
import matplotlib
matplotlib.use("Agg")
num_users = 10
num_BSs = 1
num_uavs = 2
num_trains = 1
flight_time = 30
learning_period = 50
max_episodes = 50000
noise_type = "ou"
env = Environment('Env-1', n_users=num_users, n_uavs=num_uavs, n_BSs=num_BSs, n_Trains=num_trains,
                  flight_time=flight_time)

n_agents = env.num_uavs
actor_dims = []
for i in range(n_agents):
    actor_dims.append(env.obs_space_dims)
critic_dims = num_users * 3 + num_uavs * (num_users + num_uavs + 3)  # 输入的整体状态
batch_size = 1024
n_actions = 2 + num_users
scenario = 'simple'
save_dir = Path('results') / datetime.datetime.now().strftime('%Y-%m-%dT%H-%M-%S')
save_dir.mkdir(parents=True)
save_dir_render = save_dir / 'render_trajectory'
save_dir_render.mkdir(parents=True)

# T.autograd.set_detect_anomaly(True)
# device = T.device('cuda' if T.cuda.is_available() else 'cpu')
maddpg_agents = MADDPG(actor_dims, critic_dims, n_agents, n_actions, num_users, noise_type, max_episodes,
                       alpha=0.0001, beta=0.001, scenario=scenario,
                       chkpt_dir=str(save_dir) + '/tmp/maddpg/')

memory = MultiAgentReplayBuffer(100000, critic_dims, actor_dims,
                                n_actions, n_agents, batch_size=batch_size)

logger = MetricLogger(save_dir)

# logger.record_initials(len(agent.memory), agent.batch_size, agent.exploration_rate_decay, agent.burnin, agent.learn_every, agent.sync_every)
evaluate = False
if evaluate:
    maddpg_agents.load_checkpoint()

score_history = []
best_score = 0

for agent in maddpg_agents.agents:
    agent.normal_scalar = np.pi / 2
    agent.normal_scalar_decay = 1

learning_starting = 1

with open(str(save_dir) + "/specs.txt", "w") as text_file:
    text_file.write("Number of Users: %s\n" % num_users)
    text_file.write("Number of Base Stations: %s\n" % num_BSs)
    text_file.write("Number of UAVs: %s\n" % num_uavs)
    text_file.write("Normal Scalar: %s\n" % maddpg_agents.agents[0].normal_scalar)
    text_file.write("Learning Period: %s\n" % learning_period)

# Assuming you have a variable `visdom_enabled` to check whether to use Visdom
visdom_enabled = True  # Set this based on your configuration or argument

# Initialize Visdom instance (ensure you have a Visdom server running)
# viz = visdom.Visdom()
                                                                                         #  python -m visdom.server
# Optionally, set up the windows for the rewards if not already created
#  You can do this once before starting the training
# if visdom_enabled and not viz.win_exists('Total rewards over episodes'):
#     viz.line(X=np.array([0]), Y=np.array([0]), win='Total rewards over episodes',
#              opts={'title': 'Total rewards over episodes', 'xlabel': 'Episode', 'ylabel': 'Reward'})
# 通过动态调整 normal_scalar 来控制智能体的探索行为。
# 它在一定条件下停止探索参数的进一步衰减，以确保探索行为不会过度减少，从而保持适度的探索和利用平衡。
for e in range(max_episodes):
    obs = env.reset()
    # if maddpg_agents.agents[0].normal_scalar < 0.25:
    #     stop_learning = True
    #     for agent in maddpg_agents.agents:
    #         agent.normal_scalar_decay = 1
    #         agent.normal_scalar = 0.25
    # else:
    #     decay_factor = 1 - (e / max_episodes)  # 随着 episode 增加，衰减因子逐渐减小
    #
    #     for agent in maddpg_agents.agents:
    #         # 基于 decay_factor 控制衰减
    #         agent.normal_scalar *= decay_factor
    current_reward = 0
    done = [False] * n_agents
    score = 0
    # maddpg_agents.epsilon = max(maddpg_agents.epsilon_min, maddpg_agents.epsilon * (maddpg_agents.epsilon_decay ** e))
    while not any(done):
        actions = maddpg_agents.choose_action(obs, e)
        print(actions)
        state = env.get_flattened_state()
        # obs_, reward, done, info = env.step(actions)
        obs_, reward, done = env.step(actions)
        env.update_performance_metrics()
        state_ = env.get_flattened_state()
        memory.store_transition(obs, state, actions, reward, obs_, state_, done)
        if maddpg_agents.curr_step % learning_period == 0 and memory.mem_cntr >= batch_size:
            maddpg_agents.learn(memory)
            # 记录每个 episode 的性能指标
        logger.log_step(
            np.mean(reward),  # 奖励
            env.total_uav_energy,
            env.total_user_energy,
            env.avg_aoi_ue,
            env.avg_aoi_dc)
        obs = obs_
        score += np.mean(reward)
    print(f'Total rewards: {score}')
    # Log the reward for this episode to Visdom
    # if visdom_enabled:
    #     viz.line(X=np.array([e + 1]), Y=np.array([score]), win='Total rewards over episodes',
    #              update='append')
    score_history.append(score)
    avg_score = np.mean(score_history[-10:])
    if not evaluate:
        if avg_score > best_score:
            maddpg_agents.save_checkpoint()
            best_score = avg_score
    logger.log_episode()

    if e % 10 == 0:
        env.render(e, save_dir_render)

    logger.record(
        episode=e,
        step=maddpg_agents.curr_step
    )
