# import sys
# sys.path.append('../common_code')

# Please import configs, if it is used.
from single_agent_train_configs import args

import os
from torch_geometric.loader import DataLoader
import pickle as pkl
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import torch
# import copy

from mytools import rl_utils
from mytools import function_tool as ft
from Environments import environment_with_phero as envp
from PPO_sampler import PPO_SOLVER


def main(args):
    for k, v in sorted(vars(args).items()):
        print(k, '=', v)
    if args.use_gnn:
        # if the sampler is a gcn reinforce
        from PPO_sampler import GCN_PPO as rein
    else:
        # if the sampler is a mlp reinforce
        from PPO_sampler import MLP_PPO as rein
    dataset_name = args.data_name
    graph_name = args.graph_name
    total_dataset = args.dataset
    exp_name = args.exp_name
    step = args.step
    device = args.device

    single_train = True

    # parameters
    rau, add_iter,reduce_iter, low_pher, high_pher =\
        args.rau,args.add_iter,args.reduce_iter,args.low_pher,args.high_pher
    critic_learning_rate = args.critic_lr
    actor_learning_rate = args.actor_lr
    env_learning_rate = args.env_lr
    num_episodes = args.num_episodes
    time_step = args.time_step
    early_stop = args.early_stop
    feature_dim = args.feature_dim
    hidden_dim = args.hidden_dim
    gamma = args.gamma
    low = args.l
    high = args.u
    use_GNN = args.use_gnn
    with_feature = args.with_feature

    Tolerance_time = args.Tolerance_time
    max_norm = args.max_norm
    lmbda = args.ppo_lmbda
    ppo_inner_epochs = args.ppo_inner_epochs
    ppo_eps = args.ppo_eps

    # sampler model weights location
    # gcl model weights location
    sampler_model_weights_path = 'model_weights/{}/{}/{}/{}/{}/'. \
        format(exp_name, step, total_dataset, dataset_name, graph_name)
    a = os.path.exists(sampler_model_weights_path)
    if not a:
        os.makedirs(sampler_model_weights_path)

    sampler_best_model_file = '{}_best_model.pkl'.format(graph_name)
    sampler_best_model_location = sampler_model_weights_path + sampler_best_model_file
    sampler_last_model_file = '{}_last_model.pkl'.format(graph_name)
    sampler_last_model_location = sampler_model_weights_path + sampler_last_model_file

    # env_best_model_file = '{}_best_env_learner.pkl'.format(graph_name)
    # env_best_model_location = sampler_model_weights_path + env_best_model_file
    # env_last_model_file = '{}_last_env_learner.pkl'.format(graph_name)
    # env_last_model_location = sampler_model_weights_path + env_last_model_file

    # solving results to save
    training_results_path = 'results/{}/{}/{}/{}/{}/'.\
        format(exp_name, step, total_dataset, dataset_name, graph_name)
    a = os.path.exists(training_results_path)
    if not a:
        os.makedirs(training_results_path)

    # data information
    original_data_path = '../{}/{}/{}/'.format(total_dataset, dataset_name, graph_name)
    transformed_data_filename = '{}_transform.pkl'.format(graph_name)
    # transformed_data_filename = '{}_struc2vec_transform.pkl'.format(graph_name)
    transformed_pyg_graph_data_location = original_data_path + transformed_data_filename
    dataset = pkl.load(open(transformed_pyg_graph_data_location, 'rb'))
    data_index = np.random.RandomState(seed=args.seed).permutation(len(dataset))
    data_index, train_index, test_index, train_num, test_num = \
        ft.cut_index_2(data_index, args.train_ratio)
    print('----------- data index infor -------------')
    print('data_index =', data_index)
    print('train_index =', train_index)
    print('test_index =', test_index)
    print('------------------------------------------')
    # save the data index
    data_index_file = 'data_index.txt'
    data_index_location = training_results_path + data_index_file
    train_index_file = 'train_index.txt'
    train_index_location = training_results_path + train_index_file
    test_index_file = 'test_index.txt'
    test_index_location = training_results_path + test_index_file

    F1 = open(data_index_location, 'w')
    for i in data_index:
        F1.write(str(i) + '\n')
    F1.close()
    F2 = open(train_index_location, 'w')
    for i in train_index:
        F2.write(str(i) + '\n')
    F2.close()
    F3 = open(test_index_location, 'w')
    for i in test_index:
        F3.write(str(i) + '\n')
    F3.close()


    train_data = []
    # test_data = []
    for i in train_index:
        # if gcn train_data.append(dataset[i].to(device))
        train_data.append(dataset[i].to(device))
    train_dataloader = DataLoader(train_data, batch_size=32, shuffle=True)
    # agent
    state_dim = feature_dim * 2 + 1
    # env_actor = envp.Env_learner(state_dim, hidden_dim,env_learning_rate,device)

    agent = rein.PPO(state_dim,hidden_dim, actor_learning_rate, critic_learning_rate,
                 lmbda, ppo_inner_epochs, ppo_eps, gamma, device,max_norm)


    episode_best_dim_list_file = 'episode_best_dim_list.txt'
    episode_best_dim_list_location = training_results_path + episode_best_dim_list_file
    episode_Q_list_file = 'episode_Q_list.txt'
    episode_Q_list_location = training_results_path + episode_Q_list_file
    episode_ave_dim_list_file = 'episode_ave_dim_list.txt'
    episode_ave_dim_list_location = training_results_path + episode_ave_dim_list_file
    episode_ave_Q_list_file = 'episode_ave_Q_list.txt'
    episode_ave_Q_list_location = training_results_path + episode_ave_Q_list_file
    moving_average_return_list_file = 'moving_average_return_list.txt'
    moving_average_return_list_location = training_results_path + moving_average_return_list_file
    return_list_file = 'return_list.txt'
    return_list_location = training_results_path + return_list_file
    episode_pheromone_guide_strength_file = 'episode_pheromone_guide_strength.txt'
    episode_pheromone_guide_strength_location = training_results_path + episode_pheromone_guide_strength_file

    episode_repair_guide_strength_file = 'episode_repair_guide_strength .txt'
    episode_repair_guide_strength_location = training_results_path + episode_repair_guide_strength_file
    episode_proper_reward_file = 'episode_proper_reward .txt'
    episode_proper_reward_location = training_results_path + episode_proper_reward_file


    best_return = -1e+30
    current = 0
    train_first = True
    for i_episode in range(num_episodes):
        episode_return = 0
        i_ave_dim = 0
        i_ave_Q = 0
        i_ave_best_dim = 0
        i_ave_best_Q = 0
        i_ave_t_pheromone_guide_strength = 0
        i_ave_t_repair_guide_strength = 0
        i_ave_t_proper_reward = 0


        print('i_episode:',i_episode)
        for batch_data in train_dataloader:
            # batch_loss = 0
            batch_return = 0  # -1e+10
            ave_dim = 0
            ave_Q = 0
            ave_best_dim = 0
            ave_best_Q = 0
            ave_t_pheromone_guide_strength = 0
            ave_t_repair_guide_strength = 0
            ave_t_proper_reward = 0


            agent.actor_optimizer.zero_grad()
            agent.critic_optimizer.zero_grad()
            # env_actor.env_actor_optimizer.zero_grad()

            for j in range(len(batch_data)):
                # note that, compute grad on each graph

                env= envp.RandomSampling(batch_data[j],use_GNN, with_feature,low,high, rau, add_iter,
                reduce_iter, low_pher, high_pher,device)

                g_return, g_best_dim, g_Q, \
                    g_best_solution, g_ave_dim, g_ave_Q, \
                    g_t_pheromone_guide_strength,g_t_repair_guide_strength,g_proper_reward \
                          = PPO_SOLVER.mlp_solve(env, agent, single_train, time_step, Tolerance_time)

                batch_return += g_return
                # batch_loss += loss
                ave_dim += g_ave_dim
                ave_Q += g_ave_Q
                ave_best_dim += g_best_dim
                ave_best_Q += g_Q
                ave_t_repair_guide_strength += g_t_repair_guide_strength
                ave_t_pheromone_guide_strength += g_t_pheromone_guide_strength
                ave_t_proper_reward += g_proper_reward


            agent.actor_optimizer.step()
            agent.critic_optimizer.step()
            # env_actor.env_actor_optimizer.step()


            episode_return += batch_return/len(train_index)
            i_ave_dim += ave_dim/len(train_index)
            i_ave_Q += ave_Q/len(train_index)
            i_ave_best_dim += ave_best_dim/len(train_index)
            i_ave_best_Q += ave_best_Q/len(train_index)
            i_ave_t_repair_guide_strength += ave_t_repair_guide_strength/len(train_index)
            i_ave_t_pheromone_guide_strength += ave_t_pheromone_guide_strength/len(train_index)
            i_ave_t_proper_reward += ave_t_proper_reward/len(train_index)


        if best_return < episode_return:
            best_return = episode_return
            current = 0
            # save best model: actor
            torch.save(agent.actor.state_dict(), sampler_best_model_location)
            # torch.save(env_actor.env_actor.state_dict(), env_best_model_location)
        else:
            current += 1

        if train_first:

            F = open(episode_best_dim_list_location, 'w+')
            F.write(str(i_ave_best_dim) + '\n')
            F.close()
            F = open(episode_Q_list_location, 'w+')
            F.write(str(i_ave_best_Q) + '\n')
            F.close()

            F = open(episode_ave_dim_list_location, 'w+')
            F.write(str(i_ave_dim) + '\n')
            F.close()
            F = open(episode_ave_Q_list_location, 'w+')
            F.write(str(i_ave_Q) + '\n')
            F.close()

            F = open(return_list_location, 'w+')
            F.write(str(episode_return) + '\n')
            F.close()

            F = open(episode_pheromone_guide_strength_location, 'w+')
            F.write(str(i_ave_t_pheromone_guide_strength) + '\n')
            F.close()

            F = open(episode_repair_guide_strength_location, 'w+')
            F.write(str(i_ave_t_repair_guide_strength) + '\n')
            F.close()

            F = open(episode_proper_reward_location, 'w+')
            F.write(str(i_ave_t_proper_reward) + '\n')
            F.close()


        else:

            F = open(episode_best_dim_list_location, 'a+')
            F.write(str(i_ave_best_dim) + '\n')
            F.close()
            F = open(episode_Q_list_location, 'a+')
            F.write(str(i_ave_best_Q ) + '\n')
            F.close()

            F = open(episode_ave_dim_list_location, 'a+')
            F.write(str(i_ave_dim ) + '\n')
            F.close()
            F = open(episode_ave_Q_list_location, 'a+')
            F.write(str(i_ave_Q ) + '\n')
            F.close()

            F = open(return_list_location, 'a+')
            F.write(str(episode_return ) + '\n')
            F.close()

            F = open(episode_pheromone_guide_strength_location, 'a+')
            F.write(str(i_ave_t_pheromone_guide_strength) + '\n')
            F.close()

            F = open(episode_repair_guide_strength_location, 'a+')
            F.write(str(i_ave_t_repair_guide_strength) + '\n')
            F.close()

            F = open(episode_proper_reward_location, 'a+')
            F.write(str(i_ave_t_proper_reward) + '\n')
            F.close()

        train_first = False

        if current > early_stop:
            break
    # save last model: actor
    torch.save(agent.actor.state_dict(), sampler_last_model_location)
    # torch.save(env_actor.env_actor.state_dict(), env_last_model_location)





    # fig1 = plt.figure()
    # ax11 = fig1.add_subplot(121)
    # # PLOT DIM AND Q VALUE
    # episodes_index_list = list(range(len(episode_best_dim_list)))
    # ax11.plot(episodes_index_list, episode_best_dim_list, label='#Best-dimension')
    # ax11.plot(episodes_index_list, episode_Q_list, label='#Best-unresolved nodes')
    # ax11.set_xlabel('Episodes')
    # ax11.set_ylabel('Number of nodes')
    # # ax11.set_title('Solving on a graph')
    # plt.legend()
    #
    # ax21 = fig1.add_subplot(122)
    # episodes_index_list = list(range(len(episode_ave_dim_list)))
    # ax21.plot(episodes_index_list, episode_ave_dim_list, label='#Ave-dimension')
    # ax21.plot(episodes_index_list, episode_ave_Q_list, label='#Ave-unresolved nodes')
    # ax21.set_xlabel('Episodes')
    # ax21.set_ylabel('Average number of nodes')
    # # ax21.set_title('Solving on a graph')
    # plt.legend()
    # plt.tight_layout()
    # plt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=None,hspace=0.2)
    # plt.savefig(training_results_path+'dim_infor.png')
    # #plt.show()
    #
    # # plot reward list
    # fig2 = plt.figure()
    # # plt.title('REINFORCE on {}'.format(env_name))
    # ax21 = fig2.add_subplot(121)
    # episodes_list = list(range(len(return_list)))
    # ax21.plot(episodes_list, return_list)
    # ax21.set_xlabel('Episodes')
    # ax21.set_ylabel('Returns')
    #
    # ax22 = fig2.add_subplot(122)
    # mv_return = rl_utils.moving_average(return_list, 9)
    # ax22.plot(episodes_list, mv_return)
    # ax22.set_xlabel('Episodes')
    # ax22.set_ylabel('Moving average returns')
    # #plt.legend()
    # plt.tight_layout()
    # plt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=None, hspace=0.05)
    # plt.savefig(training_results_path+'reward_infor.png')
    # #plt.show()

if __name__ == '__main__':
    """
    datasetname = 'crg_gnp_random_graph'  # 'crg_gnp_random_graph', 'rpt_rt_tree_graph','rc_bg_graph'
    graphname = 'crg_gnp_0.2'  # 'crg_gnp_p' p=0.2~0.9. 'rpt_rt','rc_bg'
    label_list = ['crg', 'gnp']  # ['crg', 'gnp'], ['rpt', 'rt'], ['rc', 'bg']
    give the metric dimension of each graph
    """
    # ////////////////////////----------EXPERIMENT SETUP----------////////////////////////////
    # experiment

    args.data_name = 'crg_gnp_random_graph'
    args.label_list = ['crg', 'gnp']
    p = 0.9
    args.graph_name = 'crg_gnp_{}'.format(p)

    # args.data_name = 'rc_bg_graph'
    # args.label_list = ['rc', 'bg']
    # args.graph_name = 'rc_bg'

    # args.data_name = 'rpt_rt_tree_graph'
    # args.label_list =  ['rpt', 'rt']
    # args.graph_name = 'rpt_rt'

    args.feature_dim = 64
    args.hidden_dim = 64
    args.gamma = 0.98

    args.early_stop = 50
    args.time_step = 3  # 3
    args.num_episodes = 1000



    args.exp_name = 'salmas'
    args.step = 'single_train'
    args.dataset = 'salmas_data_1'

    args.with_feature = True
    args.use_gnn = False

    args.l = -1 # -(1e-20)
    args.u = 1 # 1e-20
    args.Tolerance_time = 300.0
    
    args.ppo_lmbda = 0.1
    args.ppo_inner_epochs = 3
    args.ppo_eps = 0.2
    args.max_norm = 60.0


    
    args.critic_lr = 0.01
    args.actor_lr = 0.01
    args.env_lr = 0.01
    args.rau = 0.95
    # for single agent training
    args.add_iter = 1
    args.reduce_iter = 1
    #
    args.low_pher = 0
    args.high_pher = 1000.0

    main(args)


