import torch
import torch.optim as optim
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.tensorboard import SummaryWriter
import ray
import os
import numpy as np
import random

from model import PolicyNet, QNet
from runner import RLRunner
from test_worker import TestWorker
from worker import Worker
from parameter import *
import warnings

warnings.filterwarnings('ignore')

# os.environ['CUDA_VISIBLE_DEVICES'] = '0'

# ray.init(num_gpus=1)
ray.init()
print("Welcome to RL autonomous exploration!")

writer = SummaryWriter(train_path)
if not os.path.exists(model_path):
    os.makedirs(model_path)
if not os.path.exists(gifs_path):
    os.makedirs(gifs_path)

maplist = os.listdir(f'DungeonMaps/train')
maplist.sort(reverse=True)
with open('./log.txt', 'w') as f:
    for i in maplist:
        f.write(i + '\n')

def writeToTensorBoard(writer, tensorboardData, curr_episode):
    # each row in tensorboardData represents an episode
    # each column is a specific metric

    tensorboardData = np.array(tensorboardData)
    tensorboardData = list(np.nanmean(tensorboardData, axis=0))
    reward, value, policyLoss, qValueLoss, entropy, policyGradNorm, qValueGradNorm, log_alpha, alphaLoss, ceLoss, success_rate, total_steps = tensorboardData

    writer.add_scalar(tag='Losses/Value', scalar_value=value, global_step=curr_episode)
    writer.add_scalar(tag='Losses/Policy Loss', scalar_value=policyLoss, global_step=curr_episode)
    # writer.add_scalar(tag='Losses/KL Loss', scalar_value=klLoss, global_step=curr_episode)
    writer.add_scalar(tag='Losses/Alpha Loss', scalar_value=alphaLoss, global_step=curr_episode)
    writer.add_scalar(tag='Losses/Q Value Loss', scalar_value=qValueLoss, global_step=curr_episode)
    writer.add_scalar(tag='Losses/Entropy', scalar_value=entropy, global_step=curr_episode)
    writer.add_scalar(tag='Losses/Policy Grad Norm', scalar_value=policyGradNorm, global_step=curr_episode)
    writer.add_scalar(tag='Losses/Q Value Grad Norm', scalar_value=qValueGradNorm, global_step=curr_episode)
    writer.add_scalar(tag='Losses/Log Alpha', scalar_value=log_alpha, global_step=curr_episode)
    # writer.add_scalar(tag='Losses/Log Beta', scalar_value=log_beta, global_step=curr_episode)
    writer.add_scalar(tag='Losses/CE', scalar_value=ceLoss, global_step=curr_episode)
    writer.add_scalar(tag='Perf/Reward', scalar_value=reward, global_step=curr_episode)
    # writer.add_scalar(tag='Perf/Travel Distance', scalar_value=travel_dist, global_step=curr_episode)
    writer.add_scalar(tag='Perf/Success Rate', scalar_value=success_rate, global_step=curr_episode)
    writer.add_scalar(tag='Perf/Total Steps', scalar_value=total_steps, global_step=curr_episode)
    # writer.add_scalar(tag='Perf/Heuristic Utility', scalar_value=utility, global_step=curr_episode)


def run_test(curr_episode):
    save_img = True if curr_episode % 100 == 0 else False
    device = torch.device('cuda') if USE_GPU else torch.device('cpu')
    global_network = PolicyNet(INPUT_DIM, EMBEDDING_DIM).to(device)
    global_q_net = QNet(INPUT_DIM, EMBEDDING_DIM)

    if device == 'cuda':
        checkpoint = torch.load(f'{model_path}/checkpoint.pth')
    else:
        checkpoint = torch.load(f'{model_path}/checkpoint.pth', map_location = torch.device('cpu'))

    global_network.load_state_dict(checkpoint['policy_model'])
    weights = global_network.state_dict()
    global_q_net.load_state_dict(checkpoint['q_net1_model'])
    
    test_worker = TestWorker(meta_agent_id=None, policy_net=global_network, global_step=curr_episode % 10, test=True, device=device, save_image=save_img, greedy=False)
    test_worker.work(curr_episode)
    test_perf_metrics = test_worker.perf_metrics
    # steps = perf_metrics['steps']
    # done = perf_metrics['success_rate']
    test_worst_u = test_perf_metrics['evader_shortest_worst_u']
    test_avg_u = test_perf_metrics['evader_shortest_avg_u']
    test_heu_u = test_perf_metrics['heuristic_u']

    print('test_worst_u: ', test_worst_u, 'test_avg_u: ', test_avg_u, 'test_heuristic_u: ', test_heu_u)
    
    worker = TestWorker(meta_agent_id=None, policy_net=global_network, global_step=curr_episode % 76, test=False, device=device, save_image=save_img, greedy=False)
    worker.work(curr_episode)
    perf_metrics = worker.perf_metrics
    # steps = perf_metrics['steps']
    # done = perf_metrics['success_rate']
    worst_u = perf_metrics['evader_shortest_worst_u']
    avg_u = perf_metrics['evader_shortest_avg_u']
    heu_u = perf_metrics['heuristic_u']

    print('worst_u: ', worst_u, 'avg_u: ', avg_u, 'heu_u: ', heu_u)

    return test_worst_u, test_avg_u, test_heu_u, worst_u, avg_u, heu_u

def write_test(writer, test_worst_u_list, test_avg_u_list, test_heu_u_list, train_worst_u_list, train_avg_u_list, train_heu_u_list, curr_episode):
    test_worst_u = np.array(test_worst_u_list).mean()
    test_avg_u = np.array(test_avg_u_list).mean()
    test_heu_u = np.array(test_heu_u_list).mean()

    train_worst_u = np.array(train_worst_u_list).mean()
    train_avg_u = np.array(train_avg_u_list).mean()
    train_heu_u = np.array(train_heu_u_list).mean()

    writer.add_scalar(tag='Test/Worst Utility', scalar_value=test_worst_u, global_step=curr_episode)
    writer.add_scalar(tag='Test/Avg Utility', scalar_value=test_avg_u, global_step=curr_episode)
    writer.add_scalar(tag='Test/Heuristic Utility', scalar_value=test_heu_u, global_step=curr_episode)

    writer.add_scalar(tag='Train/Worst Utility', scalar_value=train_worst_u, global_step=curr_episode)
    writer.add_scalar(tag='Train/Avg Utility', scalar_value=train_avg_u, global_step=curr_episode)
    writer.add_scalar(tag='Train/Heuristic Utility', scalar_value=train_heu_u, global_step=curr_episode)

def main():
    # use GPU/CPU for driver/worker
    device = torch.device('cuda') if USE_GPU_GLOBAL else torch.device('cpu')
    local_device = torch.device('cuda') if USE_GPU else torch.device('cpu')
    
    # initialize neural networks
    global_policy_net = PolicyNet(INPUT_DIM, EMBEDDING_DIM).to(device)
    global_q_net1 = QNet(INPUT_DIM, EMBEDDING_DIM).to(device)
    global_q_net2 = QNet(INPUT_DIM, EMBEDDING_DIM).to(device)
    log_alpha = torch.FloatTensor([-2]).to(device)  # not trainable when loaded from checkpoint, manually tune it for now
    log_alpha.requires_grad = True
    # log_beta = torch.FloatTensor([-1.6]).to(device)  # not trainable when loaded from checkpoint, manually tune it for now
    # log_beta.requires_grad = True

    global_target_q_net1 = QNet(INPUT_DIM, EMBEDDING_DIM).to(device)
    global_target_q_net2 = QNet(INPUT_DIM, EMBEDDING_DIM).to(device)
    
    # initialize optimizers
    global_policy_optimizer = optim.Adam(global_policy_net.parameters(), lr=LR)
    global_q_net1_optimizer = optim.Adam(global_q_net1.parameters(), lr=LR)
    global_q_net2_optimizer = optim.Adam(global_q_net2.parameters(), lr=LR)
    log_alpha_optimizer = optim.Adam([log_alpha], lr=1e-4)
    # log_beta_optimizer = optim.Adam([log_beta], lr=1e-4)

    # initialize decay (not use)
    policy_lr_decay = optim.lr_scheduler.StepLR(global_policy_optimizer, step_size=DECAY_STEP, gamma=0.96)
    q_net1_lr_decay = optim.lr_scheduler.StepLR(global_q_net1_optimizer,step_size=DECAY_STEP, gamma=0.96)
    q_net2_lr_decay = optim.lr_scheduler.StepLR(global_q_net2_optimizer,step_size=DECAY_STEP, gamma=0.96)
    log_alpha_lr_decay = optim.lr_scheduler.StepLR(log_alpha_optimizer, step_size=DECAY_STEP, gamma=0.96)
    # log_beta_lr_decay = optim.lr_scheduler.StepLR(log_beta_optimizer, step_size=DECAY_STEP, gamma=0.96)
    
    # target entropy for SAC
    entropy_target = 0.05 * (-np.log(1 / K_SIZE ** N_ROBOTS))
    # entropy_target = 0.01 * (-np.log(1 / K_SIZE))

    curr_episode = 0
    target_q_update_counter = 1

    # load model and optimizer trained before
    if LOAD_MODEL:
        print('Loading Model...')
        checkpoint = torch.load(model_path + '/checkpoint.pth')
        global_policy_net.load_state_dict(checkpoint['policy_model'])
        global_q_net1.load_state_dict(checkpoint['q_net1_model'])
        global_q_net2.load_state_dict(checkpoint['q_net2_model'])
        log_alpha = checkpoint['log_alpha']  # not trainable when loaded from checkpoint, manually tune it for now
        global_policy_optimizer.load_state_dict(checkpoint['policy_optimizer'])
        global_q_net1_optimizer.load_state_dict(checkpoint['q_net1_optimizer'])
        global_q_net2_optimizer.load_state_dict(checkpoint['q_net2_optimizer'])
        log_alpha_optimizer.load_state_dict(checkpoint['log_alpha_optimizer'])
        # log_beta_optimizer.load_state_dict(checkpoint['log_beta_optimizer'])
        policy_lr_decay.load_state_dict(checkpoint['policy_lr_decay'])
        q_net1_lr_decay.load_state_dict(checkpoint['q_net1_lr_decay'])
        q_net2_lr_decay.load_state_dict(checkpoint['q_net2_lr_decay'])
        log_alpha_lr_decay.load_state_dict(checkpoint['log_alpha_lr_decay'])
        # log_beta_lr_decay.load_state_dict(checkpoint['log_beta_lr_decay'])
        curr_episode = checkpoint['episode']

        print("curr_episode set to ", curr_episode)
        print(log_alpha)
        print(global_policy_optimizer.state_dict()['param_groups'][0]['lr'])

    global_target_q_net1.load_state_dict(global_q_net1.state_dict())
    global_target_q_net2.load_state_dict(global_q_net2.state_dict())
    global_target_q_net1.eval()
    global_target_q_net2.eval()

    # launch meta agents
    meta_agents = [RLRunner.remote(i) for i in range(NUM_META_AGENT)]

    # get global networks weights
    weights_set = []
    if device != local_device:
        policy_weights = global_policy_net.to(local_device).state_dict()
        q_net1_weights = global_q_net1.to(local_device).state_dict()
        global_policy_net.to(device)
        global_q_net1.to(device)
    else:
        policy_weights = global_policy_net.to(local_device).state_dict()
        q_net1_weights = global_q_net1.to(local_device).state_dict()
    weights_set.append(policy_weights)
    weights_set.append(q_net1_weights)

    # distributed training if multiple GPUs available
    dp_policy = nn.DataParallel(global_policy_net)
    dp_q_net1 = nn.DataParallel(global_q_net1)
    dp_q_net2 = nn.DataParallel(global_q_net2)
    dp_target_q_net1 = nn.DataParallel(global_target_q_net1)
    dp_target_q_net2 = nn.DataParallel(global_target_q_net2)

    # launch the first job on each runner
    job_list = []
    for i, meta_agent in enumerate(meta_agents):
        curr_episode += 1
        job_list.append(meta_agent.job.remote(weights_set, curr_episode))
    
    # initialize metric collector
    metric_name = ['success_rate', 'total_steps']
    training_data = []
    # test_steps_list = []
    # test_success_list = []
    test_worst_u_list = []
    test_avg_u_list = []
    test_heu_u_list = []
    train_worst_u_list = []
    train_avg_u_list = []
    train_heu_u_list = []

    len_train = 0
    perf_metrics = {}
    for n in metric_name:
        perf_metrics[n] = []

    # initialize training replay buffer
    experience_buffer = []
    for i in range(16):
        experience_buffer.append([])
    
    # collect data from worker and do training
    try:
        maxreward = 0
        while True:
            # wait for any job to be completed
            done_id, job_list = ray.wait(job_list)
            # get the results
            done_jobs = ray.get(done_id)
            
            # save experience and metric
            for job in done_jobs:
                job_results, metrics, info = job
                for i in range(len(experience_buffer)):
                    experience_buffer[i] += job_results[i]
                for n in metric_name:
                    perf_metrics[n].append(metrics[n])

            # launch new task
            curr_episode += 1
            job_list.append(meta_agents[info['id']].job.remote(weights_set, curr_episode))
            
            # start training
            newmaxreward = 0
            if curr_episode % 1 == 0 and len(experience_buffer[0]) >= MINIMUM_BUFFER_SIZE:
                print("training")

                # keep the replay buffer size
                if len(experience_buffer[0]) >= REPLAY_SIZE:
                    for i in range(len(experience_buffer)):
                        experience_buffer[i] = experience_buffer[i][-REPLAY_SIZE:]

                indices = range(len(experience_buffer[0]))

                # training for n times each step
                for j in range(8):
                    # randomly sample a batch data
                    sample_indices = random.sample(indices, BATCH_SIZE)
                    rollouts = []
                    for i in range(len(experience_buffer)):
                        # print('buffer: ', i, len(experience_buffer[0]), len(experience_buffer[i]))
                        # assert len(experience_buffer[i]) >= len(experience_buffer[0]), 'error'
                        rollouts.append([experience_buffer[i][index] for index in sample_indices])

                    # stack batch data to tensors
                    node_inputs_batch = torch.stack(rollouts[0]).to(device)
                    all_edge_inputs_batch = torch.stack(rollouts[1]).to(device)
                    all_current_inputs_batch = torch.stack(rollouts[2]).to(device)
                    node_padding_mask_batch = torch.stack(rollouts[3]).to(device)
                    all_edge_padding_mask_batch = torch.stack(rollouts[4]).to(device)
                    edge_mask_batch = torch.stack(rollouts[5]).to(device)
                    action_batch = torch.stack(rollouts[6]).to(device)
                    reward_batch = torch.stack(rollouts[7]).to(device)
                    done_batch = torch.stack(rollouts[8]).to(device)
                    next_node_inputs_batch = torch.stack(rollouts[9]).to(device)
                    next_all_edge_inputs_batch = torch.stack(rollouts[10]).to(device)
                    next_all_current_inputs_batch = torch.stack(rollouts[11]).to(device)
                    next_node_padding_mask_batch = torch.stack(rollouts[12]).to(device)
                    next_all_edge_padding_mask_batch = torch.stack(rollouts[13]).to(device)
                    next_edge_mask_batch = torch.stack(rollouts[14]).to(device)
                    reference_policy_batch = torch.stack(rollouts[15]).to(device)
                    # print('reference_policy_batch: ', reference_policy_batch.shape)
                    q_net_node_inputs_batch = node_inputs_batch
                    q_net_next_node_inputs_batch = next_node_inputs_batch

                    action_list = []
                    rp_list = []
                    for i in range(BATCH_SIZE):
                        action = 0
                        rp = 0
                        for j in range(N_ROBOTS):
                            action += action_batch[i, j] * K_SIZE ** j
                            rp += reference_policy_batch[i, j] * K_SIZE ** j
                        action_list.append(action)
                        rp_list.append(rp)

                    # action_batch = action_batch.unsqueeze(1)
                    action_batch = torch.tensor(action_list).unsqueeze(1).unsqueeze(1).to(device) # (batch_size, 1, 1)
                    reference_policy_batch = torch.tensor(rp_list).unsqueeze(1).unsqueeze(1).to(device) # (batch_size, 1, 1)
                    
                    # SAC
                    with torch.no_grad():
                        # print(all_edge_inputs_batch.shape, all_current_index_batch.shape)
                        q_values1, _ = dp_q_net1(q_net_node_inputs_batch, all_edge_inputs_batch, all_current_inputs_batch, node_padding_mask_batch, all_edge_padding_mask_batch, edge_mask_batch)
                        q_values2, _ = dp_q_net2(q_net_node_inputs_batch, all_edge_inputs_batch, all_current_inputs_batch, node_padding_mask_batch, all_edge_padding_mask_batch, edge_mask_batch)
                        q_values = torch.min(q_values1, q_values2)

                    logps_list = []
                    policy_loss = 0
                    for i in range(N_ROBOTS):
                        # print(all_edge_inputs_batch[:, :, i*K_SIZE:(i+1)*K_SIZE].shape, all_current_inputs_batch[:, :, i].shape, all_edge_padding_mask_batch[:, :, i*K_SIZE:(i+1)*K_SIZE].shape)
                        logps_list.append(dp_policy(node_inputs_batch, all_edge_inputs_batch[:, :, i*K_SIZE:(i+1)*K_SIZE], all_current_inputs_batch[:, :, i].unsqueeze(2), node_padding_mask_batch, all_edge_padding_mask_batch[:, :, i*K_SIZE:(i+1)*K_SIZE], edge_mask_batch))
                    logps = torch.stack(logps_list, dim=0)
                    
                    logp = torch.zeros_like(q_values).to(device)
                    # -----------------------------------------------------------------------------------
                    # 3v1
                    # for i in range(K_SIZE):
                    #     for j in range(K_SIZE):
                    #         for k in range(K_SIZE):
                    #             logp[:, i*1 + j*K_SIZE + k*K_SIZE**2, 0] = logps[0, :, i] + logps[1, :, j] + logps[2, :, k]
                    # 2v1
                    # for i in range(K_SIZE):
                    #     for j in range(K_SIZE):
                    #         logp[:, i*1 + j*K_SIZE, 0] = logps[0, :, i] + logps[1, :, j]
                    # -----------------------------------------------------------------------------------
                    # 使用广播方式计算 logp
                    # logp[:, :K_SIZE**2, 0] = (logps[0, :, :, None] + logps[1, :, None, :]).reshape(BATCH_SIZE, K_SIZE**2)
                    # -----------------------------------------------------------------------------------
                     # 广播
                    tensor_0 = logps[0, :, :].unsqueeze(2).unsqueeze(3).unsqueeze(4).unsqueeze(5)  # (batch_size, K_SIZE, 1, 1, 1, 1)
                    tensor_1 = logps[1, :, :].unsqueeze(1).unsqueeze(3).unsqueeze(4).unsqueeze(5)  # (batch_size, 1, K_SIZE, 1, 1, 1)
                    tensor_2 = logps[2, :, :].unsqueeze(1).unsqueeze(2).unsqueeze(4).unsqueeze(5)  # (batch_size, 1, 1, K_SIZE, 1, 1)
                    tensor_3 = logps[3, :, :].unsqueeze(1).unsqueeze(2).unsqueeze(3).unsqueeze(5)  # (batch_size, 1, 1, 1, K_SIZE, 1)
                    tensor_4 = logps[4, :, :].unsqueeze(1).unsqueeze(2).unsqueeze(3).unsqueeze(4)  # (batch_size, 1, 1, 1, 1, K_SIZE)

                    # 计算 result，通过广播进行相加
                    result = tensor_0 + tensor_1 + tensor_2 + tensor_3 + tensor_4
                    # print('result: ', result.shape)
                    # 调整结果形状
                    logp = result.permute(0, 5, 4, 3, 2, 1).reshape(BATCH_SIZE, K_SIZE**N_ROBOTS, -1)  # (batch_size, K_SIZE**N_ROBOTS, 1)
                 # -----------------------------------------------------------------------------------
                    policy_loss = torch.sum((logp.exp() * (log_alpha.exp().detach() * logp - q_values.detach())), dim=1).mean()

                    beta = BETA
                    ce = torch.gather(logp, 1, reference_policy_batch)
                    # print('reference_policy_batch: ', reference_policy_batch.shape)
                    # print('ce: ', ce)
                    policy_loss -= beta * ce.mean()
                        
                    beta_loss = beta * ce.mean()
                    global_policy_optimizer.zero_grad()
                    policy_loss.backward()
                    policy_grad_norm = torch.nn.utils.clip_grad_norm_(global_policy_net.parameters(), max_norm=100, norm_type=2)
                    global_policy_optimizer.step()

                    with torch.no_grad():
                        next_q_values1, _ = dp_target_q_net1(q_net_next_node_inputs_batch, next_all_edge_inputs_batch, next_all_current_inputs_batch, next_node_padding_mask_batch, next_all_edge_padding_mask_batch, next_edge_mask_batch)
                        next_q_values2, _ = dp_target_q_net2(q_net_next_node_inputs_batch, next_all_edge_inputs_batch, next_all_current_inputs_batch, next_node_padding_mask_batch, next_all_edge_padding_mask_batch, next_edge_mask_batch)
                        next_q_values = torch.min(next_q_values1, next_q_values2)
                        
                        next_logps_list = []
                        for i in range(N_ROBOTS):
                            # print(all_edge_inputs_batch[:, :, i*K_SIZE:(i+1)*K_SIZE].shape, all_current_inputs_batch[:, :, i].shape, all_edge_padding_mask_batch[:, :, i*K_SIZE:(i+1)*K_SIZE].shape)
                            next_logps_list.append(dp_policy(next_node_inputs_batch, next_all_edge_inputs_batch[:, :, i*K_SIZE:(i+1)*K_SIZE], next_all_current_inputs_batch[:, :, i].unsqueeze(2), node_padding_mask_batch, next_all_edge_padding_mask_batch[:, :, i*K_SIZE:(i+1)*K_SIZE], edge_mask_batch))

                        # next_logps = dp_policy(next_node_inputs_batch, next_all_edge_inputs_batch, next_all_current_inputs_batch, next_node_padding_mask_batch, next_all_edge_padding_mask_batch, next_edge_mask_batch)
                        next_logps = torch.stack(next_logps_list, dim=0)
                        next_logp = torch.zeros_like(next_q_values).to(device)
                        # -----------------------------------------------------------------------------------
                        # 3v1
                        # for i in range(K_SIZE):
                        #     for j in range(K_SIZE):
                        #         for k in range(K_SIZE):
                        #             next_logp[:, i*1 + j*K_SIZE + k*K_SIZE**2, 0] = next_logps[0, :, i] + next_logps[1, :, j] + next_logps[2, :, k]
                        
                        # 2v1
                        # for i in range(K_SIZE):
                        #     for j in range(K_SIZE):
                        #         next_logp[:, i*1 + j*K_SIZE, 0] = next_logps[0, :, i] + next_logps[1, :, j]
                        # -----------------------------------------------------------------------------------
                        # # best_j, best_k, best_l = np.unravel_index(idx, min_vs_for_each_jkl.shape)
                        # next_logp[:, :K_SIZE**2, 0] = (next_logps[0, :, :, None] + next_logps[1, :, None, :]).reshape(BATCH_SIZE, K_SIZE**2)
                        # -----------------------------------------------------------------------------------
                        # 广播
                        tensor_0 = next_logps[0, :, :].unsqueeze(2).unsqueeze(3).unsqueeze(4).unsqueeze(5)  # (batch_size, K_SIZE, 1, 1, 1, 1)
                        tensor_1 = next_logps[1, :, :].unsqueeze(1).unsqueeze(3).unsqueeze(4).unsqueeze(5)  # (batch_size, 1, K_SIZE, 1, 1, 1)
                        tensor_2 = next_logps[2, :, :].unsqueeze(1).unsqueeze(2).unsqueeze(4).unsqueeze(5)  # (batch_size, 1, 1, K_SIZE, 1, 1)
                        tensor_3 = next_logps[3, :, :].unsqueeze(1).unsqueeze(2).unsqueeze(3).unsqueeze(5)  # (batch_size, 1, 1, 1, K_SIZE, 1)
                        tensor_4 = next_logps[4, :, :].unsqueeze(1).unsqueeze(2).unsqueeze(3).unsqueeze(4)  # (batch_size, 1, 1, 1, 1, K_SIZE)

                        # 计算 result，通过广播进行相加
                        result = tensor_0 + tensor_1 + tensor_2 + tensor_3 + tensor_4
                        # print('result: ', result.shape)
                        # 调整结果形状
                        next_logp = result.permute(0, 5, 4, 3, 2, 1).reshape(BATCH_SIZE, K_SIZE**N_ROBOTS, -1)  # (batch_size, K_SIZE**N_ROBOTS, 1)

                        # -----------------------------------------------------------------------------------
                        value_prime_batch = torch.sum(next_logp.exp() * (next_q_values - log_alpha.exp() * next_logp), dim=1).unsqueeze(1)
                        # print('reward: ', reward_batch)
                        target_q_batch = reward_batch + GAMMA * (1 - done_batch) * value_prime_batch 

                    mse_loss = nn.MSELoss()
                    # action_batch = action_batch.permute(0, 2, 1)
                    q_values1, _ = dp_q_net1(q_net_node_inputs_batch, all_edge_inputs_batch, all_current_inputs_batch, node_padding_mask_batch, all_edge_padding_mask_batch, edge_mask_batch)
                    q1 = torch.gather(q_values1, 1, action_batch)
                    q1_loss = mse_loss(q1, target_q_batch.detach()).mean()

                    global_q_net1_optimizer.zero_grad()
                    q1_loss.backward()
                    q_grad_norm = torch.nn.utils.clip_grad_norm_(global_q_net1.parameters(), max_norm=20000, norm_type=2)
                    global_q_net1_optimizer.step()
                    
                    q_values2, _ = dp_q_net2(q_net_node_inputs_batch, all_edge_inputs_batch, all_current_inputs_batch, node_padding_mask_batch, all_edge_padding_mask_batch, edge_mask_batch)
                    q2 = torch.gather(q_values2, 1, action_batch)
                    q2_loss = mse_loss(q2, target_q_batch.detach()).mean()

                    global_q_net2_optimizer.zero_grad()
                    q2_loss.backward()
                    q_grad_norm = torch.nn.utils.clip_grad_norm_(global_q_net2.parameters(), max_norm=20000, norm_type=2)
                    global_q_net2_optimizer.step()

                    entropy = (logp * logp.exp()).squeeze(2).sum(dim=-1).mean()
                    alpha_loss = -(log_alpha * (entropy.detach() + entropy_target)).mean()
                    
                    log_alpha_optimizer.zero_grad()
                    alpha_loss.backward()
                    log_alpha_optimizer.step()
                    
                    target_q_update_counter += 1


                # data record to be written in tensorboard
                perf_data = []
                for n in metric_name:
                    perf_data.append(np.nanmean(perf_metrics[n]))
                data = [reward_batch.mean().item(), value_prime_batch.mean().item(), policy_loss.item(), q1_loss.item(),
                        entropy.mean().item(), policy_grad_norm.item(), q_grad_norm.item(), log_alpha.item(), alpha_loss.item(), beta_loss.item(), *perf_data]
                training_data.append(data)
                len_train = len(training_data)
                newmaxreward = max(maxreward, data[0])

            # write record to tensorboard
            if len(training_data) >= SUMMARY_WINDOW and curr_episode % 15 == 0:
                writeToTensorBoard(writer, training_data, curr_episode)
                training_data = []
                perf_metrics = {}
                for n in metric_name:
                    perf_metrics[n] = []

            # get the updated global weights
            weights_set = []
            if device != local_device:
                policy_weights = global_policy_net.to(local_device).state_dict()
                q_net1_weights = global_q_net1.to(local_device).state_dict()
                global_policy_net.to(device)
                global_q_net1.to(device)
            else:
                policy_weights = global_policy_net.to(local_device).state_dict()
                q_net1_weights = global_q_net1.to(local_device).state_dict()
            weights_set.append(policy_weights)
            weights_set.append(q_net1_weights)
            
            # update the target q net
            if target_q_update_counter > 64:
                print("update target q net")
                target_q_update_counter = 1
                global_target_q_net1.load_state_dict(global_q_net1.state_dict())
                global_target_q_net2.load_state_dict(global_q_net2.state_dict())
                global_target_q_net1.eval()
                global_target_q_net2.eval()

            # save the model
            if curr_episode % 32 == 0 or newmaxreward >= maxreward:
                print('Saving model', end='\n')
                checkpoint = {"policy_model": global_policy_net.state_dict(),
                                "q_net1_model": global_q_net1.state_dict(),
                                "q_net2_model": global_q_net2.state_dict(),
                                "log_alpha": log_alpha,
                                "policy_optimizer": global_policy_optimizer.state_dict(),
                                "q_net1_optimizer": global_q_net1_optimizer.state_dict(),
                                "q_net2_optimizer": global_q_net2_optimizer.state_dict(),
                                "log_alpha_optimizer": log_alpha_optimizer.state_dict(),
                                "episode": curr_episode,
                                "policy_lr_decay": policy_lr_decay.state_dict(),
                                "q_net1_lr_decay": q_net1_lr_decay.state_dict(),
                                "q_net2_lr_decay": q_net2_lr_decay.state_dict(),
                                "log_alpha_lr_decay": log_alpha_lr_decay.state_dict()
                        }
                path_checkpoint = "./" + model_path + "/checkpoint.pth"
                torch.save(checkpoint, path_checkpoint)
                if newmaxreward > maxreward:
                    path_checkpoint = "./" + model_path + "/best_checkpoint_{}.pth".format(curr_episode)
                    torch.save(checkpoint, path_checkpoint)
                maxreward = newmaxreward
                print('Saved model', end='\n')

            # test the model
            if curr_episode % 1 == 0 and len_train > 0:
                test_worst_u, test_avg_u, test_heu_u, train_worst_u, train_avg_u, train_heu_u = run_test(curr_episode)
                test_worst_u_list.append(test_worst_u)
                test_avg_u_list.append(test_avg_u)
                test_heu_u_list.append(test_heu_u)

                train_worst_u_list.append(train_worst_u)
                train_avg_u_list.append(train_avg_u)
                train_heu_u_list.append(train_heu_u)


            if len_train >= SUMMARY_WINDOW and curr_episode % 15 == 0:
                write_test(writer, test_worst_u_list, test_avg_u_list, test_heu_u_list, train_worst_u_list, train_avg_u_list, train_heu_u_list, curr_episode)
                test_worst_u_list = []
                test_avg_u_list = []
                test_heu_u_list = []
                train_worst_u_list = []
                train_avg_u_list = []
                train_heu_u_list = []
                len_train = 0

        for a in meta_agents:
            print("Killing remote workers")
            ray.kill(a)                
    
    except KeyboardInterrupt:
        print("CTRL_C pressed. Killing remote workers")
        for a in meta_agents:
            ray.kill(a)


if __name__ == "__main__":
    main()
