# coding: utf-8
import os
os.environ['GLOG_v'] = '3'
import argparse
import logging
import datetime
import numpy as np
import mindspore.dataset as ds
# from torch.utils.data import DataLoader
import pandas as pd
import dgl_ms
import dgl

""" 
Fix the random seed. 
However, results can still be inconsistent due to multi-thread sampling in dgl  
"""
np.random.seed(666)
dgl.random.seed(666)

from src.model.layer import *
from src.model.hetergnn import *
from src.settings import *
from src.model.layer import DotProduct
from src.utils.helper import GNNHelper, test_link_prediction
from src.utils.dataset import DataFrameWithNegDataset, DataLoader, NodeDataLoader
from src.utils.graphloading import load_knowledge_graph, load_co_relation, load_graph, get_negative_sampler, add_edges


cur_sec = datetime.datetime.now().strftime('%m%d%H%M%S_')


def parse_args():
    parser = argparse.ArgumentParser()

    parser.add_argument('--model_name', type=str, default='hgt', help='Choose the base gnn')
    parser.add_argument('--suffix', type=str, default='-ed', help='To identify reverse edges')
    parser.add_argument('--num_nei', type=int, default=20, help='# of neighbours. -1 for all neighbours.')
    parser.add_argument('--num_neg', type=int, default=5, help='# of negative samples')
    parser.add_argument('--num_layer', type=int, default=2, help='# of gnn layers')
    parser.add_argument('--num_snapshot', type=int, default=3, help='# of snapshots, divided by number of edges')
    parser.add_argument('--depth', type=int, default=-1, help='The window size. -1 for all snapshots.')
    parser.add_argument('--cum', action='store_true',
                        help='Use cumulative graphs, i.e., '
                             'the current snapshot includes all the previous')
    parser.add_argument('--seq', default=False, action='store_true',
                        help='Use the output of the previous snapshot as current input. '
                             'Otherwise, use feature embeddings.')
    parser.add_argument('--time_encoding', action='store_true',
                        help='Use relative temporal encoding')
    ##### Basic setting
    parser.add_argument('--optim', type=str, default='Adam')
    parser.add_argument('--batch_size', type=int, default=1024)

    parser.add_argument('--cuda', type=int, default=2)
    parser.add_argument('--data', default='ia-movielens-user2tags-10m',
                        help='[ia-movielens-user2tags-10m, ml-1m, ml-100k]')
    parser.add_argument('--log_level', type=int, default=2,
                        help='2 for verbose level, 3 for critical')
    #### Training and eval
    parser.add_argument('--mode', type=str, default='train', help='set test mode to skip the training process')
    parser.add_argument('--reload', action='store_true',
                        help='reload checkpoint')
    parser.add_argument('--pretrain_path', type=str)
    parser.add_argument('--root_path', type=str, default='/data/zhaolifan/project/DHGNN/mindspore_code/')
    parser.add_argument('--save_path', type=str, default='checkpoint/')

    args = parser.parse_args()
    return args


args = parse_args()
ms.set_context(device_id=args.cuda, device_target='GPU', mode=ms.PYNATIVE_MODE)
ms.set_seed(666)
target_etype = table[args.data]['target']
reverse_etypes = {(s, e, t): (t, e + args.suffix, s) for s, e, t in target_etype}
params = settings
print(args)
print(params)
print(table[args.data])
save_dir = args.root_path + args.save_path + args.data + '/'
if not os.path.exists(save_dir):
    os.makedirs(save_dir, exist_ok=True)
if args.cum: save_dir += 'cum_'
if args.seq: save_dir += 'seq_'
log_fname = args.root_path + 'log/' + args.model_name + '.log'
if not os.path.exists(args.root_path + 'log/'):
    os.makedirs(args.root_path + 'log/', exist_ok=True)


def set_logger(s):
    logger.handlers.clear()
    formatter = logging.Formatter('%(asctime)s - %(levelname)s - ' + s + ' - %(message)s')
    sh = logging.StreamHandler()  # 往屏幕上输出
    sh.setLevel(min(args.log_level, 5) * 10)
    sh.setFormatter(formatter)
    logger.addHandler(sh)

    fh = logging.FileHandler(log_fname)
    fh.setLevel(min(args.log_level + 1, 5) * 10)
    fh.setFormatter(formatter)
    logger.addHandler(fh)


if __name__ == '__main__':

    params['reload'] = args.reload
    params['comment'] = ''
    params['optimizer'] = args.optim
    if args.pretrain_path:
        params['loadpath'] = save_dir + args.pretrain_path
    params['savepath'] = save_dir + f'snapshot{args.num_snapshot}.pkl'

    logger = logging.getLogger()
    logger.setLevel(min(args.log_level, 5) * 10)
    log_dir = args.root_path + 'run/' + args.model_name
    set_logger('')
    logger.info(str(args))
    logger.info(str(params))
    logger.info(str(hyperparams))

    path = '../data/' + args.data + '/'
    df_train = pd.read_csv(path + 'train.csv', header=None, names=['user', 'item', 'timestamp', 'type'])
    df_valid = pd.read_csv(path + 'valid.csv', header=None, names=['user', 'item', 'timestamp', 'type'])
    df_test = pd.read_csv(path + 'test.csv', header=None, names=['user', 'item', 'timestamp', 'type'])

    global_edge_dict, global_edata_dict = {}, {}
    for u, v in [('user', 'item'), ('item', 'user')]:
        if 'co-' + v in table[args.data]:
            etype = (u, 'co-' + v, u)
            global_edge_dict[etype], edatas = load_co_relation(df_train, table[args.data]['co-' + v], u, v)
            for ftype, edata in edatas.items():
                if ftype not in global_edata_dict:
                    global_edata_dict[ftype] = {}
                global_edata_dict[ftype][etype] = edata
    if 'edge_name' in table[args.data]:
        df_train['type'] = df_train['type'].apply(lambda x: table[args.data]['edge_name'][x])
        df_valid['type'] = df_valid['type'].apply(lambda x: table[args.data]['edge_name'][x])
        df_test['type'] = df_test['type'].apply(lambda x: table[args.data]['edge_name'][x])
    # t_max = df_valid['timestamp'].max()
    t_max = None
    main_ntypes = (target_etype[0][0], target_etype[0][-1])
    if main_ntypes[0] == main_ntypes[1]:
        num_nodes = {'user': max(df_train['user'].max(), df_train['item'].max()) + 1}
    else:
        num_nodes = {'user': df_train['user'].max() + 1, 'item': df_train['item'].max() + 1}
    print(num_nodes)
    """
    Load global knowledge graph
    """
    if 'knowledge' in table[args.data]:
        for knowledge in table[args.data]['knowledge']:
            srctype, dsttype, srcmax, dstmax, knowledge_edge_dict = load_knowledge_graph(path, knowledge, args.data)
            num_nodes[srctype] = max(num_nodes[srctype], srcmax + 1) if srctype in num_nodes else srcmax + 1
            num_nodes[dsttype] = max(num_nodes[dsttype], dstmax + 1) if dsttype in num_nodes else dstmax + 1
            for etype, edges in knowledge_edge_dict.items():
                global_edge_dict[etype] = edges
    """
    Load node features
    """
    num_features = {k: [v] for k, v in num_nodes.items()}
    features = {ntype: ms.numpy.arange(num, dtype=ms.int32).expand_dims(-1) for ntype, num in num_nodes.items()}
    if 'features' in table[args.data]:
        df_features = {}
        for ntype in table[args.data]['features']:
            features[ntype] = pd.read_csv(path + ntype + '_features.csv', header=None).values
            num_features[ntype] = features[ntype].max(0) + 1
            features[ntype] = ms.Tensor(features[ntype], dtype=ms.int32)
    feature_encoder = FeatureEncoder(num_features, hyperparams['emb_dim'],
                                     'mlp', hyperparams['hidden_dim'])

    if args.num_nei == -1:
        neighbour_sampler = dgl.dataloading.MultiLayerFullNeighborSampler(args.num_layer)
    else:
        neighbour_sampler = dgl.dataloading.NeighborSampler([args.num_nei] * args.num_layer)
    sparsity = len(df_train) / (num_nodes['user'] * num_nodes['item']) \
        if len(num_nodes) == 2 else len(df_train) / (sum(num_nodes.values()) ** 2)
    print('Sparsity: ', 1 - sparsity)
    p = df_train.value_counts(target_etype[0][0]).quantile(0.75) / num_nodes[target_etype[0][-1]]
    negative_sampler = get_negative_sampler(args.num_neg, df_train[['user', 'item']], num_nodes)
    edge_sampler = dgl.dataloading.as_edge_prediction_sampler(
        neighbour_sampler, negative_sampler=negative_sampler,
        # exclude='reverse_types', reverse_etypes=reverse_etypes
    )
    """
    Load snapshots
    """
    len_snapshot = len(df_train) // args.num_snapshot
    snapshots = []
    train_eid_dicts = []
    for i in range(args.num_snapshot):
        df = df_train.iloc[0 if args.cum else len_snapshot * i: len_snapshot * (i + 1)]
        snapshots.append(load_graph(df, num_nodes, features,
                                    global_edge_dict, global_edata_dict,
                                    t_max, suffix=args.suffix, ntypes=main_ntypes))

    """
    Load validation data and test data
    """
    target_idx = df_valid['type'].isin([tar_etype[1] for tar_etype in target_etype])
    neg_valid = np.loadtxt(path + 'valid.neg')[target_idx]
    params['valid_data'] = ds.GeneratorDataset(DataFrameWithNegDataset(df_valid[target_idx], neg_valid),
                                               column_names=list(df_valid.columns) + ['neg'])\
        .batch(args.batch_size).create_dict_iterator()
    uids_valid = df_valid[target_idx]['user'].unique()
    iids_valid = list(set(df_valid[target_idx]['item'].unique()) | set(neg_valid.flatten().tolist()))

    target_idx = df_test['type'].isin([tar_etype[1] for tar_etype in target_etype])
    neg_test = np.loadtxt(path + 'test.neg')[target_idx]
    df_test = df_test[target_idx]
    del target_idx
    uids_test = df_test['user'].unique()
    iids_test = list(set(df_test['item'].unique()) | set(neg_test.flatten().tolist()))
    params['test_data'] = ds.GeneratorDataset(DataFrameWithNegDataset(df_test, neg_test),
                                              column_names=list(df_valid.columns) + ['neg'])\
        .batch(args.batch_size).create_dict_iterator()

    uids_valid = list(set(uids_valid) | set(uids_test))
    iids_valid = list(set(iids_valid) | set(iids_test))

    valid_nodes_dataloader = NodeDataLoader(
        snapshots[-1],
        {'user': ms.Tensor(uids_valid, dtype=ms.int32),
         'item': ms.Tensor(iids_valid, dtype=ms.int32), },
        neighbour_sampler,
        batch_size=args.batch_size * 4,
        shuffle=False,
        drop_last=False)

    """
    Initial model and modelhelper
    """
    if args.cum or args.num_snapshot == 1:
        train_snapshot = snapshots[-1]
    else:
        train_snapshot = load_graph(df_train, num_nodes, features,
                                    global_edge_dict, global_edata_dict,
                                    t_max, suffix=args.suffix, ntypes=main_ntypes)
    second_etype = [etype for etype in global_edge_dict.keys() if etype[1].startswith('co-')]
    model = HGT(train_snapshot, feature_encoder.out_dim,
                hyperparams['hidden_dim'], args.num_layer, 4,
                use_time=args.time_encoding, dropout=hyperparams['dropout'],
                embedding_layer=feature_encoder if args.num_snapshot == 1 else None)
    if args.num_snapshot > 1:
        model = AttnSeqGNN(model, snapshots,
                           hyperparams['hidden_dim'], hyperparams['hidden_dim'],
                           neighbour_sampler,
                           negative_sampler, embedding_layer=feature_encoder,
                           target_etype=target_etype,
                           ignore_etype=global_edge_dict.keys(),
                           depth=args.depth,
                           rnn_like=args.seq)
    model_helper = GNNHelper(model, DotProduct(), valid_nodes_dataloader,
                             log_dir)

    params['test_task'] = test_link_prediction


    if args.mode == 'train':
        logger.info("**********START TRAINING**********")
        start_time = datetime.datetime.now()
        train_eid_dict = {etype[1]: train_snapshot.edges(etype=etype, form='eid') for etype in target_etype}
        edge_sampler = dgl.dataloading.as_edge_prediction_sampler(
            neighbour_sampler, negative_sampler=negative_sampler,
            # exclude='reverse_types', reverse_etypes=reverse_etypes
        )
        params['train_data'] = DataLoader(
            train_eid_dict,
            batch_size=args.batch_size,
            shuffle=True,
            drop_last=False)
        model.set_sampler(edge_sampler, negative_sampler)
        params['train_task'] = model_helper.fit_link_prediction
        model_helper.train_eval(**params)
        end_time = datetime.datetime.now()
        logger.info("TRAINING TIME: %s" % (end_time - start_time))

    logger.info("**********START TESTING**********")

    valid_snapshot = load_graph(df_valid, num_nodes, features,
                                global_edge_dict, global_edata_dict,
                                t_max, suffix=args.suffix, ntypes=main_ntypes)
    etypes = set(snapshots[-1].canonical_etypes) - set(global_edge_dict.keys())
    if len(snapshots) == 1 or args.cum:
        add_edges(valid_snapshot, snapshots[-1], etypes)
    else:
        add_edges(valid_snapshot, train_snapshot, etypes)
    print(valid_snapshot)
    model_helper.all_nodes_dataloader = NodeDataLoader(
        valid_snapshot,
        {'user': ms.Tensor(uids_test, dtype=ms.int32),
         'item': ms.Tensor(iids_test, dtype=ms.int32), },
        neighbour_sampler,
        batch_size=args.batch_size * 4,
        shuffle=False,
        drop_last=False)

    result = model_helper.evaluate(params['test_task'], params['test_data'],
                                   loadpath=params['savepath'] if args.mode=='reload' else None
                                   )
    logger.critical(str(args))
    logger.critical(str(hyperparams))
    logger.critical("TESTING RESULT: %s" % result)


    logger.info("CLEAR ALL")
