
from gcl_configs import args
import os
from tqdm import tqdm
from mytools import function_tool as ft
from torch_geometric.loader import DataLoader

from mygcl import gcl_evaluation as gcl_ev, gcl_model as gclm, gcl_train_fun as tfg
from mywrite import writes as ow


# def set_seed(seed):
#     """Set seed"""
#     random.seed(seed)
#     np.random.seed(seed)
#     torch.manual_seed(seed)
#     if torch.cuda.is_available():
#         torch.cuda.manual_seed(seed)
#         torch.cuda.manual_seed_all(seed)
#         torch.backends.cudnn.deterministic = True
#         torch.backends.cudnn.benchmark = True
#         torch.backends.cudnn.enable = True
#     os.environ["PYTHONHASHSEED"] = str(seed)


def main(args):
    # set_seed(args.seed)
    dataset=args.dataset
    datasetname = args.data_name
    label_list = args.label_list
    graphname = args.graph_name
    device = args.device
    epochs_gcl = args.epochs_gcl
    exp_name = args.exp_name
    step = args.step

    # used obj to process the code

    gcl_train_obj = tfg.train()
    gcl_test_obj = gcl_ev.evaluation()
    write_obj = ow.ownwrite()

    # get the path to save the results for gcl

    # data location
    original_data_path = '../{}/{}/{}/'.format(dataset,datasetname,graphname)
    original_data_file = '{}.pkl'.format(graphname)
    pyg_graph_data_location = original_data_path + original_data_file

    # gcl model weights location
    gcl_model_weights_path = 'model_weights/{}/{}/{}/{}/{}/'.\
        format(exp_name,step,dataset,datasetname, graphname)
    a = os.path.exists(gcl_model_weights_path)
    if not a:
        os.makedirs(gcl_model_weights_path)
    gcl_model_weights_file = '{}_model.pkl'.format(graphname)
    gcl_model_weights_location = gcl_model_weights_path + gcl_model_weights_file

    # gcl learning results to save
    gcl_learning_results_path = 'results/{}/{}/{}/{}/{}'.\
                       format(exp_name,step,dataset,datasetname, graphname)
    a = os.path.exists(gcl_learning_results_path)
    if not a:
        os.makedirs(gcl_learning_results_path)
    gcl_learning_results_filename = '{}.txt'.format(graphname)
    gcl_learning_results_location = gcl_learning_results_path + gcl_learning_results_filename

    # load original data
    graphs = ft.load_nx_to_pyg_for_gcl(pyg_graph_data_location,label_list,args.feature_dim)
    # print(graphs[0])

    # get the gcl encoder
    input_dim, hidden_dim, num_layers = args.input_dim_encoder, args.hidden_dim_encoder, args.num_layers_encoder
    gcl_encoder_model, contrast_model, gcl_optimizer = \
                   gclm.model().get_model(input_dim, hidden_dim, num_layers, device)

    # gcl training
    dataloader = DataLoader(graphs, batch_size=32, shuffle=True)
    del dataset
    with tqdm(total=epochs_gcl, desc='(Train)') as pbar:
        for epoch in range(1, epochs_gcl + 1):
            loss = gcl_train_obj.train(gcl_model_weights_location, gcl_encoder_model, contrast_model,
                                       dataloader, gcl_optimizer, device)
            pbar.set_postfix({'loss': loss})
            pbar.update()

    # gcl evaluation
    gcl_test_result = gcl_test_obj.test(gcl_encoder_model, dataloader, device)
    print(f'(Test): Best test F1Mi={gcl_test_result["micro_f1"]:.4f}, '
          f'F1Ma={gcl_test_result["macro_f1"]:.4f}，'
          )
    write_lists = ['F1Mi', 'F1Ma']
    results = [gcl_test_result["micro_f1"], gcl_test_result["macro_f1"]]
    write_obj.write_np_list_to_txt(write_lists, results, gcl_learning_results_location)

    # # transform data to data with distance and new feature
    # gcl_encoder_model.load_state_dict(torch.load(gcl_model_weights_location))
    # dataset = ft.load_pyg_data_with_distance(pyg_graph_data_location, args.input_dim_encoder)
    # _ = ft.load_data_with_transformed_feature(dataset,original_data_path,transformed_data_filename, gcl_encoder_model,device)
    print('Finished training!')

if __name__ == '__main__':
    """
    datasetname = 'crg_gnp_random_graph'  # 'crg_gnp_random_graph', 'rpt_rt_tree_graph','rc_bg_graph'
    graphname = 'crg_gnp_0.2'  # 'crg_gnp_p' p=0.2~0.9. 'rpt_rt','rc_bg'
    label_list = ['crg', 'gnp']  # ['crg', 'gnp'], ['rpt', 'rt'], ['rc', 'bg']
    give the metric dimension of each graph
    """
    # ////////////////////////----------EXPERIMENT SETUP----------////////////////////////////
    flag = 3 # 1,2,3
    if flag == 1:
        # 1.
        args.exp_name = 'salmas'
        args.step = 'gcl'
        args.dataset = 'salmas_data_1'
        args.data_name = 'rc_bg_graph'
        args.graph_name = 'rc_bg'
        args.label_list = ['rc', 'bg']
        args.feature_dim = 64
        args.input_dim_encoder = args.feature_dim
        args.epochs_gcl = 100
        main(args)
    elif flag == 2:
        args.exp_name = 'salmas'
        args.step = 'gcl'
        args.dataset = 'salmas_data_1'
        args.data_name = 'rpt_rt_tree_graph'
        args.graph_name = 'rpt_rt'
        args.label_list = ['rpt', 'rt']
        args.feature_dim = 64
        args.input_dim_encoder = args.feature_dim
        args.epochs_gcl = 100
        main(args)
    elif flag == 3:
        args.exp_name = 'salmas'
        args.step = 'gcl'
        args.dataset = 'salmas_data_1'
        args.data_name = 'crg_gnp_random_graph'
        args.label_list = ['crg', 'gnp']

        ps = [0.9]
        for p in ps:
            args.graph_name = 'crg_gnp_{}'.format(p)
            args.feature_dim = 64
            args.input_dim_encoder = args.feature_dim
            args.epochs_gcl = 100

            main(args)
