"""
    This code is only available for HGT model.
"""
from models import *
from constants import *
import argparse
import torch
import scipy.sparse as sps
import numpy as np
import os
import pandas as pd
import json
import gc
import time
from config import *
import itertools
import networkx as nx
import dgl
from utils import *
from hyperopt import hp, fmin, tpe, STATUS_OK, Trials
from collections import defaultdict

plt.style.use ("seaborn")

def evalue(model, loader):
    """
    验证效果
    :param model:
    :param loader:
    :return:
    """
    model.eval ()
    labels_list = []
    logits_list = []
    for i, (input_nodes, seeds, blocks) in enumerate (loader):
        blocks = [blk.to (device) for blk in blocks]
        lbl = labels[seeds]
        logits = model (blocks, 'user')
        logits_list.extend (logits.tolist ())
        labels_list.extend (lbl.tolist ())
    logits_np = np.array (logits_list)
    labels_np = np.array (labels_list)
    ks = ks_calc (logits_np[:, 1], labels_np)
    auc = auc_calc (logits_np[:, 1], labels_np)
    return ks, auc


def save_log(metrics, pic_path):
    """
    保存日志图片和记录
    :param metrics:
    :param pic_path:
    :return:
    """
    os.mkdir (pic_path)
    log_data = pd.DataFrame (metrics)
    log_data.to_csv (log_path + f"{params['task']}_{model_name}_{launch_timestamp}.log")

    log_data[['train_ks_history', 'val_ks_history', 'test_ks_history']].plot ()
    plt.tight_layout ()
    plt.savefig (pic_path + "ks.png")
    plt.clf ()

    log_data[['train_auc_history', 'val_auc_history', 'test_auc_history']].plot ()
    plt.tight_layout ()
    plt.savefig (pic_path + "auc.png")
    plt.clf ()

    log_data[['epoch_loss']].plot ()
    plt.title ("Loss")
    plt.tight_layout ()
    plt.savefig (pic_path + "loss.png")
    plt.clf ()


def train(model, G, model_name, params):
    """
    训练模型
    :param model:
    :param G:
    :param model_name:
    :param params:
    :return:
    """
    metrics = defaultdict (list)
    evalue_metric = "auc"
    best_metric = {'best_epoch': 0., 'best_val_ks': 0., 'best_test_ks': 0., 'best_val_auc': 0., 'best_test_auc': 0.}

    val_ks, test_ks, val_auc, test_auc = 0, 0, 0, 0
    saved_model_path_names = []

    train_step = 0
    early_stop_count = 0

    for epoch in np.arange (params['n_epoch']) + 1:
        model.train ()

        round_count = 0
        labels_list = []
        logits_list = []
        epoch_loss = 0.

        for i, (input_nodes, seeds, blocks) in enumerate (loader):
            blocks = [blk.to (device) for blk in blocks]
            lbl = labels[seeds]
            if use_cuda:
                lbl = lbl.cuda ()
                G.to (device)
                G.nodes[ntype].data['inp'].cuda ()
            logits = model (blocks, 'user')
            loss = F.cross_entropy (logits, lbl)
            loss.backward ()
            torch.nn.utils.clip_grad_norm_ (model.parameters (), params['clip'])  # 正则项防止梯度消失
            optimizer.step ()
            optimizer.zero_grad ()

            epoch_loss += loss.item ()
            logits_list.extend (logits.tolist ())
            labels_list.extend (lbl.tolist ())
            round_count += 1
        epoch_loss /= round_count
        logits_np = np.array (logits_list)
        labels_np = np.array (labels_list)

        train_ks = ks_calc (logits_np[:, 1], labels_np)
        train_auc = auc_calc (logits_np[:, 1], labels_np)
        train_step += 1
        scheduler.step ()
        if epoch % 1 == 0:
            old_val_ks = val_ks
            old_val_auc = val_auc
            val_ks, val_auc = evalue (model, val_loader)
            test_ks, test_auc = evalue (model, test_loader)

            if evalue_metric == "auc":
                if best_metric['best_val_auc'] < val_auc:
                    best_metric['best_epoch'] = epoch
                    best_metric['best_val_auc'] = val_auc
                    best_metric['best_test_auc'] = test_auc
                    best_metric['best_val_ks'] = val_ks
                    best_metric['best_test_ks'] = test_ks
            else:
                if best_metric['best_val_ks'] < val_ks:
                    best_metric['best_epoch'] = epoch
                    best_metric['best_val_ks'] = val_ks
                    best_metric['best_test_ks'] = test_ks
                    best_metric['best_val_auc'] = val_auc
                    best_metric['best_test_auc'] = test_auc

            print (
                f'[metric: {evalue_metric}]'
                f'Epoch: {epoch} LR: {optimizer.param_groups[0]["lr"]:.5f} Loss {epoch_loss:.4f}, '
                f'Train ks {train_ks:.4f}, Val ks {val_ks:.4f} (Best {best_metric["best_val_ks"]:.4f}), '
                f'Test ks {test_ks:.4f} (Best {best_metric["best_test_ks"]:.4f}), '
                f'Train AUC {train_auc:.2f}, Val AUC {val_auc:.2f}(Best{best_metric["best_val_auc"]:.2f}), '
                f'Test AUC {test_auc:.2f}(Best {best_metric["best_test_auc"]:.2f})'
                f'best_epoch {best_metric["best_epoch"]}'
            )

            metrics['train_ks_history'].append (train_ks)
            metrics['val_ks_history'].append (val_ks)
            metrics['test_ks_history'].append (test_ks)
            metrics['train_auc_history'].append (train_auc)
            metrics['val_auc_history'].append (val_auc)
            metrics['test_auc_history'].append (test_auc)
            metrics['epoch_loss'].append (epoch_loss)

            save_model (models_path, launch_timestamp, epoch, model, val_ks, optimizer, model_name)
            saved_name = f"{models_path}m-{model_name}-{launch_timestamp}-{val_ks:.4f}.pth.tar"
            saved_model_path_names.append (saved_name)

            # 早停
            if evalue_metric == "auc":
                if abs (val_auc - old_val_auc) < args.early_stop_threshold:
                    early_stop_count += 1
                    if early_stop_count >= args.early_stop_round:
                        print ("early stop")
                        break
                else:
                    early_stop_count = 0
            else:
                if abs (val_ks - old_val_ks) < args.early_stop_threshold:
                    early_stop_count += 1
                    if early_stop_count >= args.early_stop_round:
                        print ("early stop")
                        break
                else:
                    early_stop_count = 0

    save_log (metrics, f"log/log_pic/{params['model']}-{launch_timestamp}/")
    print (f"Evalue metric: {evalue_metric}, best epoch {best_metric['best_epoch']}, "
           f"best val ks({best_metric['best_val_ks']}), "
           f"best test ks({best_metric['best_test_ks']})")

    # 融合最好的几个模型进行测试
    ensemble_ks = best_metric['best_test_ks']
    ensemble_num = params['ensemble_num']
    ensemble_model_names = None
    if ensemble_num > 1:
        order = np.argpartition (metrics['val_ks_history'], -ensemble_num)
        ensemble_model_names = [saved_model_path_names[m_id] for m_id in order[-ensemble_num:]]
        ensemble_ks = ensemble_evalue (params, ensemble_model_names, test_loader)
    best_metric.update ({'ensemble_ks': ensemble_ks, 'ensemble_model_names': ensemble_model_names,
                         'launch_timestamp': launch_timestamp})
    return best_metric


def logits_by_specific_model(model_path, params, test_loader):
    """
    获取指定模型路径对测试集的测试效果
    :param model_path:
    :param params:
    :return:
    """
    model, optimizer, scheduler, node_embed = initialize_model (model_name, params, G)
    load_checkpoint (model, model_path, optimizer)
    model.eval ()
    logits_list = []
    if model_name in ['HGT']:
        for i, (input_nodes, seeds, blocks) in enumerate (test_loader):
            blocks = [blk.to (device) for blk in blocks]
            logits = model (blocks, 'user')
            logits_list.extend (logits.tolist ())
    else:
        raise NotImplementedError
    logits_np = np.array (logits_list)
    return logits_np


def ensemble_evalue(params, ensemble_model_path_names, test_loader):
    """
    集成多个epoch的模型进行测试
    :param params:
    :param ensemble_model_path_names:
    :param test_loader:
    :return:
    """
    labels_list = [labels[seeds].cpu ().numpy ().tolist() for input_nodes, seeds, blocks in test_loader]
    labels_li = []
    for li in labels_list:
        labels_li.extend(li)
    labels_np = np.array (labels_li).reshape (-1).astype(int)
    ensemble_num = len (ensemble_model_path_names)
    logits_np = None
    for model_path in ensemble_model_path_names:
        logits = logits_by_specific_model (model_path, params, test_loader)
        if logits_np is None:
            logits_np = logits
        else:
            logits_np = logits_np + logits

    logits_np = logits_np / ensemble_num  # 取模型结果的平均值作为概率

    ks = ks_calc (logits_np[:, 1], labels_np)  # 计算ks
    # 保存ensemble log记录
    log_content = {
        'task': params['task'],
        'model_name': model_name,
        'launch_timestamp': launch_timestamp,
        'test_ks': ks,
        'ensemble_num': ensemble_num,
        'ensemble_model_path_names': ensemble_model_path_names
    }
    log_content.update (params)
    log_content.pop ('fanout')
    log_name = f"{log_path}Ensemble-{model_name}-{launch_timestamp}-{ks:.4f}.json"
    with open (log_name, 'w') as f:
        f.write (json.dumps (log_content))
    print (f"ensemble 效果最好的 {ensemble_num}个模型, test ks效果为{ks}")
    return ks


def initial_sample(from_i, dst_i, positive_test_i_set, layer):
    """
    采集所有正例的邻居，与test用到的所有节点一起输出
    """
    for li in range (layer):
        for f_id, r_id in zip (from_i, dst_i):
            if f_id in positive_test_i_set or r_id in positive_test_i_set:
                positive_test_i_set.add (f_id)
                positive_test_i_set.add (r_id)
    sampled_from = []
    sampled_to = []
    for f_id, r_id in zip (from_i, dst_i):
        if f_id in positive_test_i_set or r_id in positive_test_i_set:
            sampled_from.append (f_id)
            sampled_to.append (r_id)
    return sampled_from, sampled_to, positive_test_i_set


def prepare_data(params, limit_size, select_index, relation_method, positive_test_i_set):
    """
    准备数据
    :param params:
    :param limit_size:
    :param select_index:
    :param relation_method:
    :param positive_test_i_set:
    :return:
    """
    max_relation_num, max_relation_ratio = params['max_relation_num'], params['max_relation_ratio']
    initial_sample_layer, initial_sample_method = params['initial_sample_layer'], params['initial_sample_method']
    data = {}

    if positive_test_i_set is not None:
        final_node = positive_test_i_set.copy ()
    else:
        final_node = None

    for relation in relations.keys ():
        if limit_size > 0:
            relation_mt_ = np.load (processed_data_path + f"{relation}_norm.npy")[:limit_size]
        else:
            relation_mt_ = np.load (processed_data_path + f"{relation}_norm.npy")
        _ = relation_mt_[select_index, :]
        del relation_mt_
        gc.collect ()
        relation_mt = _[:, select_index]
        del _
        gc.collect ()

        row, col = relation_mt.shape
        index_row, index_col = None, None

        if max_relation_ratio is not None:
            max_relation_num = int (col * max_relation_ratio)

        if relation_method == "max2min":
            min_index = np.triu_indices (row, 0, col)
            relation_mt[min_index] = -1.  # -1是最小的相似度

            limit = max_relation_num
            order = np.argpartition (relation_mt.ravel (), -limit)
            index = np.apply_along_axis (lambda x: (x // col, x % col), 0, order[-limit:])
            index_row, index_col = index[0].tolist (), index[1].tolist ()

            print_hist (relation_mt.ravel ()[order[-limit:]], title_name=relation, bins=20, save_path="log/",
                        show=False)

        elif relation_method == "average":
            min_index = np.diag_indices (row)
            relation_mt[min_index] = -1.  # -1是最小的相似度

            limit = int (max_relation_ratio)
            index_rank = np.apply_along_axis (lambda x: np.argpartition (x, -limit)[-limit:], 0, relation_mt).T
            index_col = index_rank.ravel ().tolist ()
            index_row = np.repeat (np.arange (0, row), limit).tolist ()

        elif relation_method == "probability_choice":
            def sigmoid(x):
                return 1/(1+np.exp(-x))
            limit = max_relation_num
            # 取出上三角
            triu_indices = np.triu_indices (row, 1, col)
            sim_tri = relation_mt[triu_indices].ravel ()
            # 计算所有相似度的和，将相似度除以和作为可能性,使用sigmoid规约到0~1
            prob_tri = sigmoid(sim_tri)
            prob_tri = prob_tri / sum (prob_tri)
            tri_index = np.arange (0, len (prob_tri))
            # 按照可能性无放回地抽取边
            choice_index = np.random.choice (tri_index, size=limit, replace=False, p=prob_tri)
            # 构建稀疏矩阵
            index_row = triu_indices[0][choice_index]
            index_col = triu_indices[1][choice_index]

            print_hist (sim_tri[choice_index], title_name=relation, bins=20, save_path="log/main_png/", show=False)

        if initial_sample_method == "positive_neighbor":
            index_row, index_col, positive_test_i_set_ = initial_sample (index_row, index_col,
                                                                         positive_test_i_set.copy (),
                                                                         initial_sample_layer)
            final_node.update (positive_test_i_set_)

        data[relation] = sps.csr_matrix ((([1] * len (index_col)), (index_row, index_col)), shape=(col, col),
                                         dtype=np.int8)

    uuid_sample_id = None
    if initial_sample_method == "positive_neighbor":
        final_node = list (final_node)
        final_node.sort ()
        uuid_sample_id = {uuid: sample_id for sample_id, uuid in enumerate (final_node)}
        new_col = len (uuid_sample_id)
        for relation, csr in data.items ():
            csr.resize ((new_col, new_col))
        print (f"正例采样前用户数量为{col},采样后用户数量为:{new_col}")
    return data, uuid_sample_id


def save_model(checkpoint_path, launchTimestamp, epochID, model, val_ks, optimizer, model_name):
    """
    保存模型
    :param checkpoint_path:
    :param launchTimestamp:
    :param epochID:
    :param model:
    :param val_ks:
    :param optimizer:
    :param model_name:
    :return:
    """
    torch.save ({'epoch': epochID + 1, 'state_dict': model.state_dict (), 'best_loss': val_ks,
                 'optimizer': optimizer.state_dict ()},
                f"{checkpoint_path}m-{model_name}-{launchTimestamp}-{val_ks:.4f}.pth.tar")


def load_checkpoint(model, checkpoint_PATH, optimizer):
    """
    加载模型
    :param model:
    :param checkpoint_PATH:
    :param optimizer:
    :return:
    """
    model_CKPT = torch.load (checkpoint_PATH)
    model.load_state_dict (model_CKPT['state_dict'])
    optimizer.load_state_dict (model_CKPT['optimizer'])
    return model, optimizer


def get_dataset_id(dataset, id_uuid=None, uuid_sample_id=None, id_name='客户ID号'):
    """
    这个函数将DataFrame转化为tensor，如果设置了uuid_sample_id字典，说明对于原有的uuid重新进行了编码为sample_id，结果会变成重新
    编码好的sample_id的tensor
    :param dataset:
    :param uuid_sample_id:
    :param id_uuid:
    :param id_name:
    :return: 某数据集id的tensor
    """
    if uuid_sample_id is None:
        id = dataset[id_name].map (lambda x: id_uuid.get (x)).dropna ().astype (int)
    else:
        id = dataset[id_name].map (lambda x: uuid_sample_id.get (id_uuid.get (x))).dropna ().astype (int)
    return torch.tensor (id.tolist ()).long ()


def prepare_train_val_test_index(params, exp_id=None, exp_train_id=None, uuid_sample_id=None, labels_df=None):
    """
    从文件夹获取对应试验的训练集、验证集、测试集样本id，返回一个字典
    :param params:
    :param exp_id:
    :param exp_train_id:
    :param uuid_sample_id:
    :return:
    """
    task = "fraud" if params['task'] == "fraud" else "credit"
    dataset_index = {}
    if os.path.exists (processed_data_path + f"data_sampled_id/{task}/split_{exp_id}/"):
        exp_path = f"{processed_data_path}data_sampled_id/{task}/split_{exp_id}/"
        id_name = "id"
        for data_type in ['train', 'val', 'test']:
            if data_type == "train":
                dataset = pd.read_csv (exp_path + f"sample/{data_type}_{exp_train_id}.csv",dtype={'id':str})
                if labels_df is not None and if_negtive_sample:
                    id_label = dataset.copy ()
                    id_label['label'] = id_label[id_name].map (lambda x: labels_df[int(id_uuid.get (x))]if id_uuid.get (x) is not None else None).dropna()
                    id_label.dropna (inplace=True)
                    positive = id_label[id_label.label > 0]
                    id_label.drop (index=positive.index, axis=0, inplace=True)
                    negtive = id_label.sample (len (positive), random_state=1)
                    dataset = pd.concat ([positive, negtive], axis=0).sample (frac=1).reset_index (drop=True)
            else:
                dataset = pd.read_csv (exp_path + f"{data_type}.csv")
            dataset_index[data_type] = get_dataset_id (dataset, id_uuid, uuid_sample_id, id_name)
    else:
        print ("id文件夹不存在")
        raise NotImplementedError
    return dataset_index


def prepare_positie_and_test_i(params, labels):
    """
    在构建图之前就欠采样，不推荐使用
    :param params:
    :param labels:
    :return:
    """
    task = params['task']
    positive_i = labels[labels == 1].index.tolist ()

    if os.path.exists (processed_data_path + f"test.csv"):
        test = pd.read_csv (processed_data_path + f"test.csv")
        id_name = "id"
    else:
        test = pd.read_csv (processed_data_path + f"data_sampled/{task}/test.csv")
        id_name = '客户ID号'
    test_i = test[id_name].map (lambda x: id_uuid.get (x)).dropna ().astype (int).tolist ()
    positive_test_i = {i for i in positive_i}
    positive_test_i.update (test_i)
    return positive_test_i


def initialize_model(model_name, params, G):
    """
    初始化模型
    :param model_name:
    :param params:
    :param G:
    :return:
    """
    optimizer, scheduler, node_embed, model = None, None, None, None
    if model_name == "HGT":
        model = HGT (G,
                     node_dict, edge_dict,
                     n_inp=params['n_inp'],
                     n_hid=params['n_hid'],
                     n_out=params['n_out'],
                     n_layers=params['n_layers'],
                     n_heads=params['n_heads'],
                     use_norm=True,
                     device=device).to (device)
        optimizer = torch.optim.AdamW (model.parameters ())
        scheduler = torch.optim.lr_scheduler.OneCycleLR (optimizer, total_steps=params['n_epoch'],
                                                         max_lr=params['max_lr'])
    elif model_name == "RGCN":
        # create model
        model = EntityClassify (G,
                                params['n_hid'],
                                params['n_out'],
                                num_bases=params['n_bases'],
                                num_hidden_layers=params['n_layers'] - 2,
                                dropout=params['dropout'],
                                use_self_loop=params['use_self_loop']).to (device)
        # create embeddings
        embed_layer = RelGraphEmbed (G, params['n_hid'])
        if use_cuda:
            # labels = labels.to (device)
            embed_layer = embed_layer.to (device)
        node_embed = embed_layer ()
        # optimizer
        all_params = itertools.chain (model.parameters (), embed_layer.parameters ())
        optimizer = torch.optim.AdamW (all_params)
        scheduler = torch.optim.lr_scheduler.OneCycleLR (optimizer, total_steps=params['n_epoch'],
                                                         max_lr=params['max_lr'])
    elif model_name == "HAN":
        params.update ({
            'lr': 0.005,  # Learning rate
            'n_heads': [8],  # Number of attention heads for node-level attention
            'hidden_units': 8,
            'dropout': 0.6,
            'weight_decay': 0.001,
            'num_epochs': 150,
            'patience': 100
        })

        model = HAN (meta_paths=[['face_sim', 'eyes_sim'], ['eyes_sim', 'nose_and_mouth_sim'],
                                 ['other_sim', 'graph_sim'], ['graph_sim', 'other_sim'],
                                 ['nose_and_mouth_sim', 'eyes_sim'], ['eyes_sim', 'face_sim']],
                     in_size=params['n_inp'],
                     hidden_size=params['n_hid'],
                     out_size=params['n_out'],
                     num_heads=params['n_heads'],
                     dropout=params['dropout']).to (device)

        optimizer = torch.optim.AdamW (model.parameters ())
        scheduler = torch.optim.lr_scheduler.OneCycleLR (optimizer, total_steps=params['n_epoch'],
                                                         max_lr=params['max_lr'])
    return model, optimizer, scheduler, node_embed


def prepare_directory():
    """
    确保目录正确
    :return:
    """
    for path in TO_ENSURE_PATH:
        if not os.path.exists (path):
            os.mkdir (path)


def get_args():
    """
    准备输入参数
    :return: args  (Object) 输出参数
    """
    parser = argparse.ArgumentParser (description='Training GNN')
    parser.add_argument ('--n_epoch', type=int, default=50)
    parser.add_argument ('--n_hid', type=int, default=256, help="隐藏线性层参数个数")
    parser.add_argument ('--n_inp', type=int, default=256, help="输入的特征参数个数，如果比原有特征数量大，则随机补充")
    parser.add_argument ('--clip', type=int, default=1.0)
    parser.add_argument ('--max_lr', type=float, default=1e-3)
    parser.add_argument ('--task', type=str, default="fraud")
    parser.add_argument ('--limit_size', type=int, default=-1)
    parser.add_argument ('--threshold', type=float, default=0.5)
    parser.add_argument ('--device_type', type=str, default="cpu", help="cuda or cpu, default cpu")
    parser.add_argument ('--model', type=str, default="HGT", help="RGCN,HAN, HGT")
    parser.add_argument ('--n_heads', type=int, default=4, help="HGT HAN parameter")
    parser.add_argument ('--n_layers', type=int, default=2, help="模型层数")
    parser.add_argument ('--sample_layers', type=int, default=2, help="multi-layers采样的层数，用以mini-batch")
    parser.add_argument ('--feature_type', type=str, default="source",
                         help="'source' or 'source_norm' or 'embedding' or 'source_embedding' or "
                              "'source_norm_embedding'")
    parser.add_argument ('--initial_sample_method', type=str, default="full",
                         help="'positive_neighbor' or 'full'")
    parser.add_argument ('--initial_sample_layer', type=int, default=1, help="正例抽样层数")
    parser.add_argument ('--source_type', type=str, default="norm", help="数据预处理类型，norm则使用z-score归一化，normal则不作处理")
    parser.add_argument ('--num_workers', type=int, default=2, help="加载数据的线程数量")
    parser.add_argument ('--batch_size', type=int, default=1000, help="batch size of sample node")
    parser.add_argument ('--fanout', type=str, default="full",
                         help="sample node fanout. eg: 150,150,150,150,150. 设置为full则会自动取节点的所有邻居(理论参考GraphSage)")
    parser.add_argument ('--fanout_rate', type=float, default=0.001,
                         help="(暂时废弃)")
    parser.add_argument ('--max_relation_num', type=int, default=50000,
                         help="最多取多少条关系，如果max_relation_ratio设置了，该数字会被忽略")
    parser.add_argument ('--max_relation_ratio', type=float, default=None,
                         help="最多有多少倍于user数量的关系被纳入网络，如果设置了该数字，max_relation_num会被忽略")
    parser.add_argument ('--plot', type=str, default="False",
                         help="是否plot以展示图")
    parser.add_argument ('--relation_method', type=str, default="average",
                         help="生成关系的方法。"
                              "average：每个节点固定有int(max_relation_ratio)个关系，只有max_relation_ratio起作用；"
                              "max2min: 相关性从大到小建立关系边，max_relation_ratio和max_relation_num都起作用"
                              "probability_choice: 将相关性化作可能性对边进行抽样")
    parser.add_argument ('--data_path', type=str, default=None,
                         help="源数据文件夹，config中的变量，可以覆盖指定，默认None则按照config处理")
    parser.add_argument ('--source_file_name', type=str, default=None,
                         help="源数据文件名，config中的变量，可以覆盖指定，默认None则按照config处理")
    parser.add_argument ('--processed_data_path', type=str, default=None,
                         help="目标数据文件夹，config中的变量，可以覆盖指定，默认None则按照config处理")
    parser.add_argument ('--ensemble_num', type=int, default=1, help="集成多少个效果最好的模型，如果为1则不集成")
    parser.add_argument ('--hyperopt', type=str, default=None, help="设置为bayes则会自动寻找最优的参数，不设置则不启用调参")
    parser.add_argument ('--opt_iter', type=int, default=2, help="贝叶斯调参多少轮")
    parser.add_argument ('--early_stop_round', type=int, default=10, help="多少轮指标变化小于阈值则早停")
    parser.add_argument ('--early_stop_threshold', type=float, default=0.0001, help="早停的阈值")
    parser.add_argument ('--exp_id', type=int, default=1, help="设置执行的试验源文件夹编号")
    parser.add_argument ('--exp_train_id', type=int, default=1, help="设置执行的试验源文件夹中训练集编号")
    parser.add_argument ('--graph_refresh', type=str, default="no", help="如果设置为force则每次都重新从原数据构建全图，如果为no则从cache中构建")
    args, unknown = parser.parse_known_args ()
    return args


if __name__ == '__main__':
    torch.manual_seed (0)
    args = get_args ()
    prepare_directory ()

    if_negtive_sample = True

    # ------------准备全局属性----------
    if args.data_path is not None:
        data_path = args.data_path
    if args.source_file_name is not None:
        source_file_name = args.source_file_name
    if args.processed_data_path is not None:
        processed_data_path = args.processed_data_path
    fanout_rate = float (args.fanout_rate)
    if_plot = True if args.plot == "True" else False
    launch_timestamp = int (time.time ())
    limit_size = int (args.limit_size)
    model_name = args.model
    use_cuda = True if args.device_type == "cuda" else False
    if use_cuda:
        if torch.cuda.is_available ():
            device = torch.device ("cuda")
        else:
            print ("cuda not available")
            device = torch.device ("cpu")
    else:
        device = torch.device ("cpu")

    params = {
        "model": args.model,
        "task": args.task,
        "n_hid": int (args.n_hid),
        "n_inp": int (args.n_inp),
        "n_epoch": int (args.n_epoch),
        "max_lr": args.max_lr,
        "clip": args.clip,
        "batch_size": int (args.batch_size),
        "n_layers": int (args.n_layers),
        "num_workers": int (args.num_workers),
        "initial_sample_method": args.initial_sample_method,
        "initial_sample_layer": int (args.initial_sample_layer),
        'max_relation_num': int (args.max_relation_num),
        "max_relation_ratio": float (args.max_relation_ratio) if args.max_relation_ratio is not None else None,
        'n_out': 2,
        'ensemble_num': args.ensemble_num,
        'relation_method': args.relation_method,
        'sample_layers': int (args.sample_layers)
    }

    # 准备路径
    log_path = log_path + f"{params['task']}/"
    models_path = models_path + f"{params['task']}/"
    if not os.path.exists (log_path):
        os.mkdir (log_path)
    if not os.path.exists (models_path):
        os.mkdir (models_path)

    # 准备id映射
    with open (processed_data_path + "uuid_id.json", "r") as f:
        uuid_id = json.loads (f.read ())
    id_uuid = {v: int (k) for k, v in uuid_id.items ()}

    # 准备数据
    all_data = pd.read_csv (processed_data_path + f"all_data_{args.source_type}.csv")

    if params['task'] == "fraud":
        labels = all_data['反欺诈分类'].fillna (0)
    else:
        labels = all_data['信用分类'].fillna (0)
    if limit_size > 0:
        all_data = all_data[:limit_size]
        labels = labels[:limit_size]

    labels_df = labels.copy ()

    positive_test_i_set = None
    if params['initial_sample_method'] == "positive_neighbor":
        positive_test_i_set = prepare_positie_and_test_i (params, labels)

    select_index = labels.index
    labels = torch.tensor (labels.to_numpy (), requires_grad=False).long ()

    uuid_sample_id = None

    if args.graph_refresh == "no" and os.path.exists ("cache/graph.bin"):
        G = dgl.load_graphs ("cache/graph.bin")[0][0]
    else:
        data, uuid_sample_id = prepare_data (params, limit_size, select_index, args.relation_method,
                                             positive_test_i_set)
        if uuid_sample_id is not None:
            key = uuid_sample_id.keys ()
            all_data = all_data.loc[key, :]
        # 准备异构图
        G = dgl.heterograph ({
            ('user', 'face_sim', 'user'): data['face_sim'].nonzero (),
            ('user', 'face_sim_by', 'user'): data['face_sim'].transpose ().nonzero (),
            ('user', 'eyes_sim', 'user'): data['eyes_sim'].nonzero (),
            ('user', 'eyes_sim_by', 'user'): data['eyes_sim'].transpose ().nonzero (),
            ('user', 'nose_and_mouth_sim', 'user'): data['nose_and_mouth_sim'].nonzero (),
            ('user', 'nose_and_mouth_sim_by', 'user'): data['nose_and_mouth_sim'].transpose ().nonzero (),
            ('user', 'other_sim', 'user'): data['other_sim'].nonzero (),
            ('user', 'other_sim_by', 'user'): data['other_sim'].transpose ().nonzero (),
            ('user', 'graph_sim', 'user'): data['graph_sim'].nonzero (),
            ('user', 'graph_sim_by', 'user'): data['graph_sim'].transpose ().nonzero (),
        })
        print (G)
        dgl.save_graphs ("cache/graph.bin", [G])
    # embedding
    node_dict = {}
    edge_dict = {}
    for ntype in G.ntypes:
        node_dict[ntype] = len (node_dict)
    for etype in G.etypes:
        edge_dict[etype] = len (edge_dict)
        G.edges[etype].data['id'] = torch.ones (G.number_of_edges (etype), dtype=torch.long) * edge_dict[etype]
    # Random initialize input feature
    for ntype in G.ntypes:
        if args.feature_type == "source":
            source = all_data.iloc[:, 6:].to_numpy ()
            emb = nn.Parameter (torch.from_numpy (source), requires_grad=False).float ()
            G.nodes[ntype].data['inp'] = emb
            params['n_inp'] = source.shape[1]
        elif args.feature_type == "source_norm":
            source = all_data.iloc[:, 6:]
            for feature, col in source.items ():
                if feature in ['face_visual_gender']:
                    continue
                col_mean = col.mean ()
                col_std = col.std ()
                source[feature] = col.map (lambda x: (x - col_mean) / col_std)
            source = source.to_numpy ()
            emb = nn.Parameter (torch.from_numpy (source), requires_grad=False).float ()
            G.nodes[ntype].data['inp'] = emb
            params['n_inp'] = source.shape[1]
        elif args.feature_type == "embedding":
            emb = nn.Parameter (torch.Tensor (G.number_of_nodes (ntype), params['n_inp']),
                                requires_grad=False).float ()
            nn.init.xavier_uniform_ (emb)
            G.nodes[ntype].data['inp'] = emb
        elif args.feature_type == "source_embedding":
            source = all_data.iloc[:, 6:].to_numpy ()
            if params['n_inp'] <= source.shape[1]:
                emb = nn.Parameter (torch.from_numpy (source)).float ()
                params['n_inp'] = source.shape[1]
            else:
                other = torch.Tensor (G.number_of_nodes (ntype), params['n_inp'] - source.shape[1])
                nn.init.xavier_uniform_ (other)
                emb = nn.Parameter (torch.cat ([torch.from_numpy (source), other], dim=1)).float ()
            G.nodes[ntype].data['inp'] = emb
        else:
            source = all_data.iloc[:, 6:]
            for feature, col in source.items ():
                if feature in ['face_visual_gender']:
                    continue
                col_mean = col.mean ()
                col_std = col.std ()
                source[feature] = col.map (lambda x: (x - col_mean) / col_std)
            source = source.to_numpy
            if params['n_inp'] <= source.shape[1]:
                emb = nn.Parameter (torch.from_numpy (source)).float ()
                params['n_inp'] = source.shape[1]
            else:
                other = torch.Tensor (G.number_of_nodes (ntype), params['n_inp'] - source.shape[1])
                nn.init.xavier_uniform_ (other)
                emb = nn.Parameter (torch.cat ([torch.from_numpy (source), other], dim=1)).float ()
            G.nodes[ntype].data['inp'] = emb

    if if_plot:
        gh = dgl.to_homogeneous (G)
        nx_G = gh.to_networkx ().to_undirected ()
        plt.figure (1, figsize=(40, 40))
        nx.draw (nx_G, with_labels=False, node_color=[[.7, .7, .7]])
        plt.show ()

    # 设置fanout
    if args.fanout == "full":
        params['fanout'] = None
    else:
        rl_sample_num = list (map (int, args.fanout.split (",")))
        params["fanout"] = {('user', 'face_sim', 'user'): rl_sample_num[0],
                            ('user', 'face_sim_by', 'user'): rl_sample_num[0],
                            ('user', 'eyes_sim', 'user'): rl_sample_num[1],
                            ('user', 'eyes_sim_by', 'user'): rl_sample_num[1],
                            ('user', 'nose_and_mouth_sim', 'user'): rl_sample_num[2],
                            ('user', 'nose_and_mouth_sim_by', 'user'): rl_sample_num[2],
                            ('user', 'other_sim', 'user'): rl_sample_num[3],
                            ('user', 'other_sim_by', 'user'): rl_sample_num[3],
                            ('user', 'graph_sim', 'user'): rl_sample_num[4],
                            ('user', 'graph_sim_by', 'user'): rl_sample_num[4]}

    # 生成训练、验证、测试集
    dataset_index = prepare_train_val_test_index (params, args.exp_id, args.exp_train_id, uuid_sample_id, labels_df)
    train_idx = dataset_index['train']
    val_idx = dataset_index['val']
    test_idx = dataset_index['test']
    if limit_size > 0:
        train_idx = train_idx[train_idx < limit_size]
        val_idx = val_idx[val_idx < limit_size]
        test_idx = test_idx[test_idx < limit_size]
    # train sampler
    if params['fanout'] is None:
        sampler = dgl.dataloading.MultiLayerNeighborSampler ([None] * params['sample_layers'])
    else:
        sampler = dgl.dataloading.MultiLayerNeighborSampler ([params["fanout"]] * params['sample_layers'])
    loader = dgl.dataloading.NodeDataLoader (
        G, {"user": train_idx}, sampler,
        batch_size=params['batch_size'], shuffle=False, num_workers=0)
    # validation sampler
    val_sampler = dgl.dataloading.MultiLayerNeighborSampler ([params["fanout"]] * params['sample_layers'])
    val_loader = dgl.dataloading.NodeDataLoader (
        G, {"user": val_idx}, val_sampler,
        batch_size=params['batch_size'], shuffle=False, num_workers=0)
    test_sampler = dgl.dataloading.MultiLayerNeighborSampler ([params["fanout"]] * params['sample_layers'])
    test_loader = dgl.dataloading.NodeDataLoader (
        G, {"user": test_idx}, test_sampler,
        batch_size=params['batch_size'], shuffle=False, num_workers=0)

    # 模型的特殊参数
    if model_name == "HGT":
        params['n_heads'] = int (args.n_heads)

    # 模型设置与训练
    if args.hyperopt is None:
        model, optimizer, scheduler, node_embed = initialize_model (model_name, params, G)
        train (model, G, model_name, params)
    elif args.hyperopt in ['bayes', 'Bayes', 'BAYES']:
        def experiment(params):
            global model, optimizer, scheduler, node_embed, launch_timestamp
            launch_timestamp = int (time.time ())
            model, optimizer, scheduler, node_embed = initialize_model (model_name, params, G)
            result = train (model, G, model_name, params)
            return {'loss': -result['ensemble_ks'], 'status': STATUS_OK,
                    'ensemble_model_names': result['ensemble_model_names']}


        choice_list = {
            'n_heads': [4, 8],
            'n_hid': [128, 256, 512],
            'max_relation_ratio': range (5, 21, 1),
            'ensemble_num': [3, 5]
        }

        space = {
            'n_heads': hp.choice ('n_heads', choice_list['n_heads']),
            'max_lr': hp.uniform ('max_lr', 0.001, 0.01),
            'n_hid': hp.choice ('n_hid', choice_list['n_hid']),
            'max_relation_ratio': hp.choice ('max_relation_ratio', choice_list['max_relation_ratio']),
            'ensemble_num': hp.choice ('ensemble_num', choice_list['ensemble_num']),
            'clip': hp.uniform ('clip', 1, 5),
        }
        params.update (space)
        trials = Trials ()
        best = fmin (experiment, params, algo=tpe.suggest, max_evals=args.opt_iter, trials=trials)
        for k, v in choice_list.items ():
            best[k] = v[best[k]]

        best_ensemble_loss = - trials.best_trial['result']['loss']
        best_ensemble_model_names = trials.best_trial['result']['ensemble_model_names']
        print (f'最佳的参数配置为{best},best_ensemble_loss为{best_ensemble_loss},最佳集成的模型:{best_ensemble_model_names}')

        params['ensemble_model_names'] = best_ensemble_model_names
        params['best_ensemble_loss'] = best_ensemble_loss
        params.update (best)
        params.pop ('fanout')
        with open (f'log/opt/opt-{launch_timestamp}-{best_ensemble_loss:.4f}-params.json', 'w') as f:
            f.write (json.dumps (params))

        params['split_id'] = args.exp_id
        params['sample_id'] = args.exp_train_id
        params_df = pd.DataFrame(params)
        params_df.to_csv("log/opt_summary.csv",mode='a',header=False,index=False)
