#!/usr/bin/env python
# coding: utf-8

# In[1]:


"""Script for experiments with standard learning with GNNs (including GIB-GAT, GAT, GCN and other baselines.)"""
import argparse
import datetime
import os
import sys

import numpy as np   # 这个方式使用numpy的函数时，需要以np.开头
import torch

# sys.path.append("G:\\2020-11-08-GIB\\GIB-master\\GIB")
sys.path.append(os.path.join(os.path.dirname("__file__"), '..'))
sys.path.append(os.path.join(os.path.dirname("__file__"), '..', '..'))

from GIB.experiments.GIB_node_model import GNN, get_data, train, train_baseline
from GIB.pytorch_net.util import Beta_Function, str2bool, eval_tuple, \
    to_string, filter_filename
from GIB.util import add_distant_neighbors, process_data_for_nettack, GIB_PATH
from GIB.DeepRobust.deeprobust.graph.defense import GCNJaccard
from GIB.DeepRobust.deeprobust.graph.defense import RGCN

# ## Settings:

# In[2]:

"""
Successful  Typical GIB-Cat setting: model_type="GAT", beta1=0.001, beta2=0.1, struct_dropout_mode=("Nsampling",'multi-categorical-sum',0.1,3) (or ("DNsampling",'multi-categorical-sum',0.1,3,2));
Successful  Typical GIB-Bern setting:model_type="GAT", beta1=0.001, beta2=0.1, struct_dropout_mode=("Nsampling",'Bernoulli',0.1,0.5,"norm") (or ("DNsampling",'Bernoulli',0.1,0.5,"norm",2));
Successful  Standard GAT setting:    model_type="GAT", beta1=-1,    beta2=-1,  struct_dropout_mode=("standard",0.6);
Successful  Standard GCN setting:    model_type="GCN", beta1=-1,    beta2=-1
Successful  RGCN setting:            model_type="RGCN"
Successful  GCNJaccard setting:      model_type="GCNJaccard"
"""
exp_id = "exp1.0"  # Experiment id, used for the directory name saving the experiment result files.
data_type = 'Cora'  # Data type. Choose from "Cora", "Pubmed", "citeseer"
model_type = 'GAT'  # Name of the base model. Choose from "GAT", "GCN", 'GCNJaccard', 'RGCN'.
# For GIB-Cat and GIB-Bern, still choose model_type="GAT", but set either beta1 or beta2 nonzero.
beta1 = 0.001  #  XIB项的系数. If -1, 这项将没有.
beta2 = 0.1  # AIB项的系数. If -1,这个项的系数为0(但是根据“struct_dropout_mode”，仍然可以执行抽样)
struct_dropout_mode = "(\"Nsampling\",'multi-categorical-sum',0.1,3)"  # 用于表示如何生成结构化表示(图表示)的方式.
# For GIB-Cat, choose from ("Nsampling", 'multi-categorical-sum', 0.1, 3) (here 0.1 is temperature（temperature的大小和模型最终模型的正确率没有直接关系，我们可以将t的作用类比于学习率。我们的label类似于[1,0,0]，最“尖锐”，如果我们在训练时将t设置比较大，那么预测的概率分布会比较平滑，那么loss会很大，这样可以避免我们陷入局部最优解。随着训练的进行，我们将t变小，也可以称作降温，类似于模拟退火算法，这也是为什么要把t称作温度参数的原因。变小模型才能收敛）, k=3为被替换的抽样邻近边的个数), and ("DNsampling", 'multi-categorical-sum', 0.1, 3, 2) (similar as above, but with 2-hop neighbors)
# For GIB-Bern, choose from ("Nsampling",'Bernoulli',0.1,0.5,"norm") (here 0.1 is temperature, 0.5 是伯努利的先验概率，先验概率是指根据以往经验和分析得到的概率)and ("DNsampling",'Bernoulli',0.1,0.5,"norm",2) (with 2-hop neighbors)
# For standard GAT, choose from ("standard", 0.6) (在GAT中使用标准的注意力权值衰减)and ("standard", 0.6, 2) (with 2-hop neighbors)
train_fraction = 1.  # 为训练集保留的训练标签的比例. Default 1, 意思是保留全部标签.
added_edge_fraction = 0.  # 添加(或删除)随机边的分数。使用正(负)数随机添加(删除)边。. Default 0.
feature_noise_ratio = 0.  # 特征上添加的独立的高斯噪声比.
latent_size = 16  # GCN-based or GAT-based 模型的隐藏层维数.
sample_size = 1  # 每个节点X抽样多少个Z.表示GIB得到的图表示的节点数等于原来数据的节点数
num_layers = 2  # Number of layers for the GNN.
reparam_mode = "diag"  # XIB的重新参数化模式. Choose from "None", "diag" or "full"  # diag表示矩阵对角元素提取（diag是一种函数，指的是只保留原矩阵的主对角线的元素，其余的元素以零取代，默认的“diag”参数化高斯函数的平均值和对角线元素
prior_mode = "mixGau-100"  # 前的分布类型。 Choose from "Gaussian" or "mixGau-100" (100个高斯分量的混合)  ?
is_anneal_beta = True  # 是否让 beta1 and beta2在训练过程中从0递增. Default True.
val_use_mean = True  # 评估时是否使用参数值而不是采样。如果为真，在评估过程中，XIB使用平均值进行预测，AIB使用分类分布的参数进行预测
reparam_all_layers = "(-2,)"  # 使用XIB的层，例如(1,2,4)。默认值(-2，)，表示倒数第二层。如果为真，则对所有层使用XIB。使用XIB即为对节点特征应用信息瓶颈理论
epochs = 5  # Number of epochs. Default 2000
lr = -1  # Learning rate. If -1, use default learning rate for each model
weight_decay = -1  # weight decay. If -1, use default weight decay for each model
date_time = "{0}-{1}".format(datetime.datetime.now().month,
                             datetime.datetime.now().day)  # Today's month and day. Used for the directory name saving the experiment result files.
seed = 0  # Random seed.
save_best_model = True  # Whether to save the model with the best validation accuracy.
skip_previous = False  # If True, 如果相同的设置已经训练过，会跳过训练.
is_cuda = "0"  # CUDA device. Choose from False, or "cuda:${NUMBER}", where the ${NUMBER} is the GPU id.
threshold = 0.05  # threshold for GCNJaccard.
gamma = 0.5  # gamma for RGCN

try:
    # If the current envionrment is Jupyter notebook:   网页中编写代码的工具
    get_ipython().run_line_magic('matplotlib', 'inline')  # 不能注释，没有捕获异常不执行args
    import matplotlib.pylab as pl
    isplot = True    # 是否可视化绘图
except:
    # If the current envionrment is terminal, pass in settings from the command line:
    import matplotlib
    isplot = True
    parser = argparse.ArgumentParser()
    parser.add_argument('--exp_id', default=exp_id, help='experiment ID')
    parser.add_argument('--data_type', help='Data type: choose from PROTEINS.', default=data_type)
    parser.add_argument('--model_type', default=model_type, help='Model type: GCN or GAT or GCNJaccard or RGCN')
    parser.add_argument('--train_fraction', type=float, default=train_fraction, help='train_fraction')
    parser.add_argument('--added_edge_fraction', type=float, default=added_edge_fraction,
                        help='Fraction of added edges.')
    parser.add_argument('--feature_noise_ratio', type=float, default=feature_noise_ratio,
                        help='Relative amplitude of feature Gaussian noise')
    parser.add_argument('--beta1', type=float, default=beta1,
                        help='beta1 value for feature IB, set a float value >= 0.')
    parser.add_argument('--beta2', type=float, default=beta2,
                        help='beta2 value for structure IB, set a float value >= 0.')
    parser.add_argument('--latent_size', type=int, default=latent_size, help='latent_size')
    parser.add_argument('--sample_size', type=int, default=sample_size, help='sample_size')
    parser.add_argument('--num_layers', type=int, default=num_layers, help='num_layers')
    parser.add_argument('--reparam_mode', default=reparam_mode, help='diag, diagg, or full')
    parser.add_argument('--prior_mode', default=prior_mode, help='prior mode for VIB')
    parser.add_argument('--struct_dropout_mode', default=struct_dropout_mode, help='mode for structure dropout.')
    '''
    'Nsampling, categorical/subset/multi-categorical-sum/multi-categorical-max, temperature, sample-neighbor-size'
    'Nsampling, Bernoulli, temperature, prior (0~1)', 'norm'/'none'
    'DNsampling, categorical/subset/multi-categorical-sum/multi-categorical-max, temperature, sample-neighbor-size, hops'
    'DNsampling, Bernoulli, temperature, prior (0~1), 'norm'/'none', hops'
    'standard, 0.6'
    'standard, 0.6, hops'
    '''
    parser.add_argument('--is_anneal_beta', type=str2bool, nargs='?', const=True, default=is_anneal_beta,
                        help='Whether to anneal beta.')
    parser.add_argument('--val_use_mean', type=str2bool, nargs='?', const=True, default=val_use_mean,
                        help='Whether to use mean of Z during validation.')
    parser.add_argument('--reparam_all_layers', type=str, default=reparam_all_layers,
                        help='Whether to reparameterize all layers.')
    parser.add_argument('--epochs', type=int, default=epochs, help="Number of epochs.")
    parser.add_argument('--lr', type=float, default=lr, help="Learning rate.")
    parser.add_argument('--weight_decay', type=float, default=weight_decay, help="weight_decay.")
    parser.add_argument('--threshold', type=float, default=threshold, help='threshold for GCNJaccard')
    parser.add_argument('--gamma', type=float, default=gamma, help='gamma for RGCN')
    parser.add_argument('--save_best_model', type=str2bool, nargs='?', const=True, default=save_best_model,
                        help='Whether to save the best model.')
    parser.add_argument('--skip_previous', type=str2bool, nargs='?', const=True, default=skip_previous,
                        help='Whether to skip previously trained model in the same directory.')
    parser.add_argument('--date_time', default=date_time, help="Current date and time.")
    parser.add_argument('--seed', type=int, default=seed, help='seed')
    parser.add_argument('--gpuid', help='an integer for the accumulator', default=is_cuda)
    parser.add_argument('--idx', default="0", help='idx')
    args = parser.parse_args()

if "args" in locals():         # 如果args在当前范围局部变量中
    exp_id = args.exp_id
    data_type = args.data_type
    model_type = args.model_type
    train_fraction = args.train_fraction
    added_edge_fraction = args.added_edge_fraction
    feature_noise_ratio = args.feature_noise_ratio
    beta1 = args.beta1
    beta2 = args.beta2
    latent_size = args.latent_size
    sample_size = args.sample_size
    num_layers = args.num_layers
    reparam_mode = args.reparam_mode
    prior_mode = args.prior_mode
    struct_dropout_mode = eval_tuple(args.struct_dropout_mode)   # eval_tuple() 字符串转化为元组
    is_anneal_beta = args.is_anneal_beta
    val_use_mean = args.val_use_mean
    reparam_all_layers = eval_tuple(args.reparam_all_layers)
    epochs = args.epochs
    lr = args.lr
    weight_decay = args.weight_decay
    threshold = args.threshold
    gamma = args.gamma
    save_best_model = args.save_best_model
    skip_previous = args.skip_previous
    date_time = args.date_time
    seed = args.seed
    idx = args.idx
    is_cuda = eval(args.gpuid)
    if not isinstance(is_cuda, bool):   # 如果不是bool值
        is_cuda = "cuda:{}".format(is_cuda)

baseline = model_type in ['GCNJaccard', 'RGCN']
device = torch.device(is_cuda if isinstance(is_cuda, str) else "cuda" if is_cuda else "cpu")
# Directory and filename:
dirname = GIB_PATH + "\\{0}_{1}\\".format(exp_id, date_time)
if baseline:
    filename = dirname + "{0}_{1}_tr_{2}_ed_{3}_{4}_beta_{5}_{6}_lat_{7}_samp_{8}_lay_{9}_anl_{10}_mean_{11}_reall_{12}_epochs_{13}_lr_{14}_l2_{15}_seed_{16}_threshold_{17}_gamma_{18}_{19}".format(
        data_type, model_type, train_fraction, added_edge_fraction, feature_noise_ratio, beta1, beta2, latent_size,
        sample_size, num_layers,
        is_anneal_beta, val_use_mean, to_string(reparam_all_layers, "-"), epochs, lr, weight_decay, seed, threshold,
        gamma, is_cuda
    )
else:
    filename = dirname + "{0}_{1}_tr_{2}_ed_{3}_{4}_beta_{5}_{6}_lat_{7}_samp_{8}_lay_{9}_reparam_{10}_prior_{11}_sdrop_{12}_anl_{13}_mean_{14}_reall_{15}_epochs_{16}_lr_{17}_l2_{18}_seed_{19}_{20}".format(
        data_type, model_type, train_fraction, added_edge_fraction, feature_noise_ratio, beta1, beta2, latent_size,
        sample_size, num_layers, reparam_mode, prior_mode,
        to_string(struct_dropout_mode, "-"), is_anneal_beta, val_use_mean, to_string(reparam_all_layers, "-"), epochs,
        lr, weight_decay, seed, is_cuda
    )

# In[3]:


# Setting the seed:
np.random.seed(seed)   # np==numpy 设置好了之后，每次产生随机数的结果一样
torch.manual_seed(seed)  # 创建并返回一个生成器对象，该对象管理产生伪随机数的算法的状态

# Setting default hyperparameters:
if struct_dropout_mode[0] is None:
    struct_dropout_mode = ("None",)
if lr == -1:  # 训练全过程并不是使用一个固定值的学习速度，而是随着时间的推移让学习率动态变化，比如刚开始训练，离下山地点的最优值还很远，那么可以使用较大的学习率下的快一点，当快接近最优值时为避免跨过最优值，下山速度要放缓，即应使用较小学习率训练
    lr = None
if weight_decay == -1:  # 权重衰减可以避免模型过拟合问题。
    weight_decay = None
if beta1 == -1:
    beta1 = None
if beta1 is None:
    beta1_list, reparam_mode, prior_mode = None, None, None
else:
    if is_anneal_beta:
        beta_init = 0
        init_length = int(epochs / 4)  # 初始递增步
        anneal_length = int(epochs / 4)  # 递增步长
        beta_inter = Beta_Function(np.linspace(0, 1, anneal_length), 1, 4)  # np.linspace()在指定的间隔内返回均匀间隔的数字  beta_inter为4到0递减的一组数
        beta1_inter = beta_inter / 4 * (beta_init - beta1) + beta1   # beta1_inter为0到beta1=0.001递增的以一组数
        beta1_list = np.concatenate([np.ones(init_length) * beta_init, beta1_inter,
                                     np.ones(epochs - init_length - anneal_length + 1) * beta1])  # bata1_list为每个epoch的beta1值数组
    else:
        beta1_list = np.ones(epochs + 1) * beta1
if beta2 == -1:
    beta2_list = None
else:
    if is_anneal_beta:
        beta_init = 0
        init_length = int(epochs / 4)
        anneal_length = int(epochs / 4)
        beta_inter = Beta_Function(np.linspace(0, 1, anneal_length), 1, 4)
        beta2_inter = beta_inter / 4 * (beta_init - beta2) + beta2
        beta2_list = np.concatenate([np.ones(init_length) * beta_init, beta2_inter,
                                     np.ones(epochs - init_length - anneal_length + 1) * beta2])
    else:
        beta2_list = np.ones(epochs + 1) * beta2
# print(beta1_list+beta2_list)  # 这两个的作用 beta的自动调整
# Get Dataset:
data, info = get_data(data_type,  # data 和 info不相等
                      train_fraction=train_fraction,  # 为训练集保留的训练标签的比例
                      added_edge_fraction=added_edge_fraction,
                      feature_noise_ratio=feature_noise_ratio,
                      seed=seed,
                      )    # 返回包含指定数据集的pytorch几何数据对象
if struct_dropout_mode[0] == 'DNsampling' or (struct_dropout_mode[0] == 'standard' and len(struct_dropout_mode) == 3):
    add_distant_neighbors(data, struct_dropout_mode[-1])   # 添加指定跳数邻居的边的索引到数据中

data = process_data_for_nettack(data).to(device)  # 预处理数据，见函数定义处以Cora为例子

data = data.to(device)  # 数据写入到设备内存中（键值对的方式）

# 暂时忽略start
if model_type == 'GCNJaccard':
    model = GCNJaccard(nfeat=data.features.shape[1], nclass=data.labels.max() + 1,
                       num_layers=num_layers,
                       nhid=latent_size, device=device,
                       weight_decay=weight_decay if weight_decay is not None else 5e-4,
                       lr=lr if lr is not None else 0.01,
                       )
elif model_type == 'RGCN':
    model = RGCN(nnodes=data.adj.shape[0], nfeat=data.features.shape[1], nclass=data.labels.max() + 1,
                 num_layers=num_layers,
                 nhid=latent_size, device=device,
                 lr=lr if lr is not None else 0.01,
                 gamma=gamma if gamma is not None else 0.5,
                 beta1=beta1 if beta1 is not None else 5e-4,
                 beta2=weight_decay if weight_decay is not None else 5e-4,
                 )
# 暂时忽略end
else:
    # For GIB-GAT, GAT or GCN:
    model = GNN(
        model_type=model_type,
        num_features=info["num_features"],  # 特征数，以Cora为例是1433
        num_classes=info["num_classes"],  # 种类数  以Cora为例是7
        normalize=True,   # 是否正则化
        reparam_mode=reparam_mode,  # 矩阵对角元素提取（diag是一种函数，指的是只保留原矩阵的主对角线的元素，其余的元素以零取代
        prior_mode=prior_mode,   # 之前的模式  ?
        latent_size=latent_size,
        sample_size=sample_size,
        num_layers=num_layers,
        struct_dropout_mode=struct_dropout_mode,  # 用于表示如何生成结构化表示(图表示)的方式.
        dropout=True,
        val_use_mean=val_use_mean,
        reparam_all_layers=reparam_all_layers,
        is_cuda=is_cuda,
    )
print(model)
# In[ ]:

print(filename + "\n")

if skip_previous:
    # 判断当前参数设置的模型是否有训练过
    filename_core = "_".join(filename.split("\\")[-1].split("_")[:-3])
    if filename_core.endswith("cuda"):
        filename_core = filename_core[:-5]
    cand_filename = filter_filename(dirname, include=filename_core)
    if len(cand_filename) == 0:
        skip_previous = False
if skip_previous:
    print("File already exists at {} with {}".format(dirname, cand_filename))
else:   # 当前参数设置没有训练过才开始新的训练
    if baseline:
        data_record = train_baseline(model, model_type, data, device, threshold, filename, epochs,
                                     save_best_model=save_best_model, verbose=True)
    else:
        data_record = train(  # 调用GIB_node_model中的train()
            model=model,
            data=data,
            data_type=data_type,
            model_type=model_type,
            loss_type=info['loss'],
            beta1_list=beta1_list,
            beta2_list=beta2_list,
            epochs=epochs,
            inspect_interval=200 if isplot else 20,
            verbose=True,
            isplot=isplot,
            filename=filename,
            compute_metrics=None,
            lr=lr,
            weight_decay=weight_decay,
            save_best_model=save_best_model,
        )
