import sys
sys.path.append('../SALMAS')

# Please import configs, if it is used.
import copy
from single_agent_train_configs import args
import random

import os
from torch_geometric.loader import DataLoader
import pickle as pkl
import matplotlib

matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.nn as nn
# import copy

from mytools import rl_utils
from mytools import function_tool as ft
from Environments import environment_with_phero as envp
from PPO_sampler import generate_agents as gen_agent
from PPO_sampler import MLP_PPO as rein
from PPO_sampler import PPO_SOLVER


def main(args):

    for k, v in sorted(vars(args).items()):
        print(k, '=', v)

    dataset_name = args.data_name
    graph_name = args.graph_name
    total_dataset = args.dataset
    exp_name = args.exp_name
    step = args.step

    device = args.device

    # parameters
    rau, add_iter, reduce_iter, low_pher, high_pher = \
        args.rau, args.add_iter, args.reduce_iter, args.low_pher, args.high_pher
    critic_learning_rate = args.critic_lr
    actor_learning_rate = args.actor_lr
    env_learning_rate = args.env_lr
    num_episodes = args.num_episodes
    time_step = args.time_step
    feature_dim = args.feature_dim
    hidden_dim = args.hidden_dim
    gamma = args.gamma
    low = args.l
    high = args.u
    use_GNN = args.use_gnn
    with_feature = args.with_feature

    Tolerance_time = args.Tolerance_time
    max_norm = args.max_norm
    lmbda = args.ppo_lmbda
    ppo_inner_epochs = args.ppo_inner_epochs
    ppo_eps = args.ppo_eps

    sim_eps = args.sim_eps
    population_size = args.population_size
    low_per = args.low_per
    high_per = args.high_per
    single_train = args.single_train
    max_iteration = args.max_iteration
    solve_early_stop = args.solve_early_stop
    perturbation = args.perturbation

    # sampler model weights location
    # gcl model weights location
    sampler_model_weights_path = 'model_weights/{}/{}/{}/{}/{}/'. \
        format(exp_name, step, total_dataset, dataset_name, graph_name)
    a = os.path.exists(sampler_model_weights_path)
    if not a:
        os.makedirs(sampler_model_weights_path)

    sampler_best_model_file = '{}_best_model.pkl'.format(graph_name)
    sampler_best_model_location = sampler_model_weights_path + sampler_best_model_file
    sampler_last_model_file = '{}_last_model.pkl'.format(graph_name)
    sampler_last_model_location = sampler_model_weights_path + sampler_last_model_file

    # solving results to save
    training_results_path = 'results/{}/{}/{}/{}/{}/'. \
        format(exp_name, step, total_dataset, dataset_name, graph_name)
    a = os.path.exists(training_results_path)
    if not a:
        os.makedirs(training_results_path)

    # data information
    original_data_path = '../{}/{}/{}/'.format(total_dataset, dataset_name, graph_name)
    transformed_data_filename = '{}_transform.pkl'.format(graph_name)
    # transformed_data_filename = '{}_struc2vec_transform.pkl'.format(graph_name)
    transformed_pyg_graph_data_location = original_data_path + transformed_data_filename
    dataset = pkl.load(open(transformed_pyg_graph_data_location, 'rb'))

    data_index_file = 'data_index.txt'
    data_index_location = training_results_path + data_index_file
    train_index_file = 'train_index.txt'
    train_index_location = training_results_path + train_index_file
    test_index_file = 'test_index.txt'
    test_index_location = training_results_path + test_index_file

    solve_flag = 'test'
    train_data = []
    test_data = []
    if solve_flag == 'train':
        train_index = np.loadtxt(train_index_location)
        train_index = np.random.choice(train_index, 25, replace=False)
        print('train_index: ', train_index)
        for i in train_index:
            train_data.append(dataset[int(i)].to(device))
    if solve_flag == 'test':
        test_index = np.loadtxt(test_index_location)
        test_index = np.random.choice(test_index,25,replace=False)
        print('test_index: ',test_index)
        for i in test_index:
            test_data.append(dataset[int(i)].to(device))


    test_dataloader = DataLoader(test_data, batch_size=1, shuffle=False)
    # agent
    state_dim = feature_dim * 2 + 1
    # env_actor = envp.Env_learner(state_dim, hidden_dim, env_learning_rate, device)
    agent = rein.PPO(state_dim, hidden_dim, actor_learning_rate, critic_learning_rate,
                     lmbda, ppo_inner_epochs, ppo_eps, gamma, device, max_norm)
    # v1 = agent.actor.state_dict()#['project.4.weight']
    # print('v1=',v1)

    agent.actor.load_state_dict(torch.load(sampler_best_model_location))
    # v2 = agent.actor.state_dict()
    # print('v2=', v2)
    # generate agents: agent population containing the original agent
    agents = gen_agent.perturbation_agent_population(
        agent, sim_eps, population_size, low_per, high_per, perturbation,device)
    # for i in range(population_size):
    #     print('params= ',agents[i].actor.state_dict()['project.4.weight'])



    # location of solving results
    solve_best_dim_list_file = 'solve_best_dim_list.txt'
    dims_location = training_results_path + solve_best_dim_list_file
    solve_Q_list_file = 'solve_Q_list.txt'
    Q_location = training_results_path + solve_Q_list_file
    solve_best_solution_list_file = 'solve_best_solution_list.txt'
    solution_location = training_results_path + solve_best_solution_list_file

    sample_num = 25
    location_list = []
    for i in range(sample_num):
        solve_single_graph_dim_file = 'solve_single_graph_dim_{}.txt'.format(i)
        solve_single_dim_location = training_results_path + solve_single_graph_dim_file
        location_list.append(solve_single_dim_location)
        # solve_single_graph_base_file = 'solve_single_graph_base.txt'
        # solve_single_base_location = training_results_path + solve_single_graph_base_file
    count = 0
    for batch_data in test_dataloader:
        # note that, compute grad on each graph
        env = envp.RandomSampling(batch_data[0], use_GNN, with_feature, low, high, rau, add_iter,
                                  reduce_iter, low_pher, high_pher, device)

        _, _, _ = PPO_SOLVER.salmas_iteration_solve_on_single_graph(
            env, agents, single_train, time_step, Tolerance_time, max_iteration,
            solve_early_stop,
            location_list,
            count)
        count += 1

    print('Finished to solve.')



if __name__ == '__main__':
    """
    datasetname = 'crg_gnp_random_graph'  # 'crg_gnp_random_graph', 'rpt_rt_tree_graph','rc_bg_graph'
    graphname = 'crg_gnp_0.2'  # 'crg_gnp_p' p=0.2~0.9. 'rpt_rt','rc_bg'
    label_list = ['crg', 'gnp']  # ['crg', 'gnp'], ['rpt', 'rt'], ['rc', 'bg']
    give the metric dimension of each graph
    """
    # ////////////////////////----------EXPERIMENT SETUP----------////////////////////////////
    # experiment

    # args.data_name = 'crg_gnp_random_graph'
    # args.label_list = ['crg', 'gnp']
    # p = 0.9
    # args.graph_name = 'crg_gnp_{}'.format(p)

    args.single_train = False
    args.max_iteration = 1000
    args.solve_early_stop = 1000
    args.perturbation = True

    # args.data_name = 'rc_bg_graph'
    # args.label_list = ['rc', 'bg']
    # args.graph_name = 'rc_bg'

    args.data_name = 'rpt_rt_tree_graph'
    args.label_list =  ['rpt', 'rt']
    args.graph_name = 'rpt_rt'

    args.feature_dim = 64
    args.hidden_dim = 64
    args.gamma = 0.98

    args.time_step = 5  # 3

    args.exp_name = 'salmas'
    args.step = 'single_train'
    args.dataset = 'salmas_data_1'

    args.with_feature = True
    args.use_gnn = False

    args.l = -1  # -(1e-20)
    args.u = 1  # 1e-20
    args.Tolerance_time = 300.0

    args.ppo_lmbda = 0.5
    args.ppo_inner_epochs = 3
    args.ppo_eps = 0.2
    args.max_norm = 60.0

    args.critic_lr = 0.01
    args.actor_lr = 0.01
    args.env_lr = 0.01
    args.rau = 0.95
    # for single agent training
    args.add_iter = 1
    args.reduce_iter = 1
    #
    args.low_pher = 0
    args.high_pher = 1000.0

    args.sim_eps = 0.95
    args.population_size = 6
    args.low_per = -1
    args.high_per = 1

    main(args)



