
# import sys
# sys.path.append('../common_code')

# Please import configs, if it is used.
from single_agent_train_configs import args

import os
from torch_geometric.loader import DataLoader
import pickle as pkl
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import torch
# import copy

from mytools import rl_utils
from mytools import function_tool as ft
from Environments import environment_with_phero as envp
from PPO_sampler import PPO_SOLVER


exp_name = 'salmas'
step = 'single_train'
total_dataset = 'salmas_data_1'

flag = 1

if flag == 1:
    data_name = 'crg_gnp_random_graph'
    graph_name = 'crg_gnp_{}'.format(0.9)
    label = 'CG09'


elif flag == 2:
    data_name = 'rc_bg_graph'
    graph_name = 'rc_bg'
    label = 'RB'
elif flag ==3:
    data_name = 'rpt_rt_tree_graph'
    graph_name= 'rpt_rt'
    label = 'RR'

# solving results to save
training_results_path = 'results/{}/{}/{}/{}/{}/'. \
    format(exp_name, step, total_dataset, data_name, graph_name)

episode_best_dim_list_file = 'episode_best_dim_list.txt'
episode_best_dim_list_location = training_results_path + episode_best_dim_list_file
episode_Q_list_file = 'episode_Q_list.txt'
episode_Q_list_location = training_results_path + episode_Q_list_file
episode_ave_dim_list_file = 'episode_ave_dim_list.txt'
episode_ave_dim_list_location = training_results_path + episode_ave_dim_list_file
episode_ave_Q_list_file = 'episode_ave_Q_list.txt'
episode_ave_Q_list_location = training_results_path + episode_ave_Q_list_file
moving_average_return_list_file = 'moving_average_return_list.txt'
moving_average_return_list_location = training_results_path + moving_average_return_list_file
return_list_file = 'return_list.txt'
return_list_location = training_results_path + return_list_file
episode_pheromone_guide_strength_file = 'episode_pheromone_guide_strength.txt'
episode_pheromone_guide_strength_location = training_results_path + episode_pheromone_guide_strength_file

episode_repair_guide_strength_file = 'episode_repair_guide_strength .txt'
episode_repair_guide_strength_location = training_results_path + episode_repair_guide_strength_file
episode_proper_reward_file = 'episode_proper_reward .txt'
episode_proper_reward_location = training_results_path + episode_proper_reward_file


data = np.loadtxt(return_list_location)


fig1 = plt.figure()
ax11 = fig1.add_subplot(111)
# # PLOT DIM AND Q VALUE
episodes_index_list = list(range(len(data)))
ax11.plot(episodes_index_list, data, label=label)
# ax11.plot(episodes_index_list, episode_Q_list, label='#Best-unresolved nodes')
ax11.set_xlabel('Episodes')
ax11.set_ylabel('Average rewards')
# # ax11.set_title('Solving on a graph')
plt.legend()
#
# ax21 = fig1.add_subplot(122)
# episodes_index_list = list(range(len(episode_ave_dim_list)))
# ax21.plot(episodes_index_list, episode_ave_dim_list, label='#Ave-dimension')
# ax21.plot(episodes_index_list, episode_ave_Q_list, label='#Ave-unresolved nodes')
# ax21.set_xlabel('Episodes')
# ax21.set_ylabel('Average number of nodes')
# # ax21.set_title('Solving on a graph')
# plt.legend()
# plt.tight_layout()
# plt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=None,hspace=0.2)
st_file = 'stable_learning_{}.png'.format(graph_name)
plt.savefig(training_results_path+st_file)
# #plt.show()
#
# # plot reward list
# fig2 = plt.figure()
# # plt.title('REINFORCE on {}'.format(env_name))
# ax21 = fig2.add_subplot(121)
# episodes_list = list(range(len(return_list)))
# ax21.plot(episodes_list, return_list)
# ax21.set_xlabel('Episodes')
# ax21.set_ylabel('Returns')
#
# ax22 = fig2.add_subplot(122)
# mv_return = rl_utils.moving_average(return_list, 9)
# ax22.plot(episodes_list, mv_return)
# ax22.set_xlabel('Episodes')
# ax22.set_ylabel('Moving average returns')
# #plt.legend()
# plt.tight_layout()
# plt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=None, hspace=0.05)
# plt.savefig(training_results_path+'reward_infor.png')
# #plt.show()

# if __name__ == '__main__':
#     """
#     datasetname = 'crg_gnp_random_graph'  # 'crg_gnp_random_graph', 'rpt_rt_tree_graph','rc_bg_graph'
#     graphname = 'crg_gnp_0.2'  # 'crg_gnp_p' p=0.2~0.9. 'rpt_rt','rc_bg'
#     label_list = ['crg', 'gnp']  # ['crg', 'gnp'], ['rpt', 'rt'], ['rc', 'bg']
#     give the metric dimension of each graph
#     """
#     # ////////////////////////----------EXPERIMENT SETUP----------////////////////////////////
#     # experiment
#
#     args.data_name = 'crg_gnp_random_graph'
#     args.label_list = ['crg', 'gnp']
#     p = 0.9
#     args.graph_name = 'crg_gnp_{}'.format(p)
#
#     # args.data_name = 'rc_bg_graph'
#     # args.label_list = ['rc', 'bg']
#     # args.graph_name = 'rc_bg'
#
#     # args.data_name = 'rpt_rt_tree_graph'
#     # args.label_list =  ['rpt', 'rt']
#     # args.graph_name = 'rpt_rt'
#
#     args.feature_dim = 64
#     args.hidden_dim = 64
#     args.gamma = 0.98
#
#     args.early_stop = 50
#     args.time_step = 3  # 3
#     args.num_episodes = 1000
#
#
#
#     args.exp_name = 'salmas'
#     args.step = 'single_train'
#     args.dataset = 'salmas_data_1'
#
#     args.with_feature = True
#     args.use_gnn = False
#
#     args.l = -1 # -(1e-20)
#     args.u = 1 # 1e-20
#     args.Tolerance_time = 300.0
#
#     args.ppo_lmbda = 0.1
#     args.ppo_inner_epochs = 3
#     args.ppo_eps = 0.2
#     args.max_norm = 60.0
#
#
#
#     args.critic_lr = 0.01
#     args.actor_lr = 0.01
#     args.env_lr = 0.01
#     args.rau = 0.95
#     # for single agent training
#     args.add_iter = 1
#     args.reduce_iter = 1
#     #
#     args.low_pher = 0
#     args.high_pher = 1000.0
#
#     main(args)



