import random
import logging

import torch
import numpy as np
import pandas as pd

from time import time
from prettytable import PrettyTable

from utils.logger import logging_setting
from utils.parser import parse_args
from utils.data_loader import load_data

from modules.KGIN_RI import KGIN_RI

from utils.evaluate import test
from utils.helper import early_stopping

from collections import defaultdict

n_users = 0
n_items = 0
n_entities = 0
n_nodes = 0
n_relations = 0
n_recall_items = 0

# 生成模型训练的正负样本
def get_feed_dict(train_entity_pairs, start, end, train_user_set, recall_items, user_aor_dict, aor_spu_dict):

    # 下面有三种负采样策略，本次竞赛最终采用了第一种采样策略：在用户没有购买过的所有菜品中进行负采样
    def negative_sampling(user_item, train_user_set):
        neg_items = []
        for user, _ in user_item.cpu().numpy():
            user = int(user)
            while True:
                neg_item = np.random.randint(low=0, high=n_items, size=1)[0]
                if neg_item not in train_user_set[user]:
                    break
            neg_items.append(neg_item)
        return neg_items

    # 在召回物品上做负采样，而不是在所有物品
    def recall_negative_sampling(user_item, train_user_set, recall_items):
        neg_items = []
        for user, _ in user_item.cpu().numpy():
            user = int(user)
            while True:
                neg_item = np.random.randint(low=0, high=n_recall_items, size=1)[0]
                neg_item = recall_items[neg_item]
                if neg_item not in train_user_set[user]:
                    break
            neg_items.append(neg_item)
        return neg_items

    # 在每个用户有过购买记录的aor内做负采样，而不是在所有物品
    def aor_negative_sampling(user_item, train_user_set, user_aor_dict, aor_spu_dict):
        neg_items = []
        for user, _ in user_item.cpu().numpy():
            user = int(user)
            if len(user_aor_dict[user]) == 1:
                while True:
                    neg_item = np.random.randint(low=0, high=len(aor_spu_dict[user_aor_dict[user][0]]), size=1)[0]
                    neg_item = aor_spu_dict[user_aor_dict[user][0]][neg_item]
                    if neg_item not in train_user_set[user]:
                        break
            else:
                aor_index = np.random.randint(low=0, high=len(user_aor_dict[user]), size=1)[0]
                while True:
                    neg_item = np.random.randint(low=0, high=len(aor_spu_dict[user_aor_dict[user][aor_index]]), size=1)[0]
                    neg_item = aor_spu_dict[user_aor_dict[user][aor_index]][neg_item]
                    if neg_item not in train_user_set[user]:
                        break
            neg_items.append(neg_item)
        return neg_items


    feed_dict = {}
    entity_pairs = train_entity_pairs[start:end].to(device)
    feed_dict['users'] = entity_pairs[:, 0]
    feed_dict['pos_items'] = entity_pairs[:, 1]
    if args.negative_sampling == 'all':
        feed_dict['neg_items'] = torch.LongTensor(negative_sampling(entity_pairs,
                                                                train_user_set)).to(device)
    elif args.negative_sampling == 'recall':
        feed_dict['neg_items'] = torch.LongTensor(recall_negative_sampling(entity_pairs,
                                                                    train_user_set, recall_items)).to(device)
    elif args.negative_sampling == 'aor':
        feed_dict['neg_items'] = torch.LongTensor(aor_negative_sampling(entity_pairs,
                                                                        train_user_set, user_aor_dict, aor_spu_dict))
    return feed_dict


if __name__ == '__main__':
    """fix the random seed"""
    seed = 2020
    random.seed(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)
    torch.cuda.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = False

    """read args"""
    global args, device
    args = parse_args()
    device = torch.device("cuda:"+str(args.gpu_id)) if args.cuda else torch.device("cpu")

    logging_setting(args)
    logging.info(args)

    """build dataset"""
    train_cf, test_cf, user_dict, n_params, graph, mat_list, recall_items, user_rebuy, user_aor_dict, aor_spu_dict = load_data(args)
    adj_mat_list, norm_mat_list, mean_mat_list = mat_list


    n_users = n_params['n_users']
    n_items = n_params['n_items']
    n_entities = n_params['n_entities']
    n_relations = n_params['n_relations']
    n_nodes = n_params['n_nodes']
    n_recall_items = len(recall_items)

    """cf data"""
    train_cf_pairs = torch.LongTensor(np.array([[cf[0], cf[1]] for cf in train_cf], np.int32))
    test_cf_pairs = torch.LongTensor(np.array([[cf[0], cf[1]] for cf in test_cf], np.int32))

    """choose model"""
    if args.model == 'KGIN_RI':
        model = KGIN_RI(n_params, args, graph, mean_mat_list[0]).to(device)

    """define optimizer"""
    optimizer = torch.optim.Adam(model.parameters(), lr=args.lr, weight_decay=0)

    if args.mode == 'train':
        if args.stop_train_condition == 'HR':
            cur_best_pre_0 = 0
        elif args.stop_train_condition == 'loss':
            cur_best_pre_0 = float('Inf')
        cur_best_ret = {}
        cur_best_epoch = 0
        stopping_step = 0
        should_stop = False

        logging.info("start training ...")
        for epoch in range(args.epoch):
            """training CF"""
            # shuffle training data
            index = np.arange(len(train_cf))
            np.random.shuffle(index)
            train_cf_pairs = train_cf_pairs[index]

            """training"""
            loss, s, cor_loss = 0, 0, 0
            train_s_t = time()
            while s + args.batch_size <= len(train_cf):
                batch = get_feed_dict(train_cf_pairs,
                                      s, s + args.batch_size,
                                      user_dict['train_user_set'], recall_items, user_aor_dict, aor_spu_dict)

                batch_loss = batch_loss
                optimizer.zero_grad()
                batch_loss.backward()
                optimizer.step()

                loss += batch_loss
                s += args.batch_size

            train_e_t = time()

            # 下面的条件是为了限制采用loss方法时，刚开始的100epoch的时候loss不下降的情况。
            if ((epoch % 5 == 1 or epoch == 1) and args.stop_train_condition == "HR") \
                    or (epoch % 5 == 0  and args.stop_train_condition == "loss" and epoch >= args.loss_start_epoch):
                if args.stop_train_condition == "HR":
                    """testing"""
                    test_s_t = time()
                    ret = test(model, user_dict, n_params, recall_items, user_rebuy)
                    test_e_t = time()

                    train_res = PrettyTable()
                    train_res.field_names = ["Epoch", "training time", "tesing time", "Loss", "hit_ratio"]
                    train_res.add_row(
                        [epoch, train_e_t - train_s_t, test_e_t - test_s_t, loss.item(), ret['hit_ratio']]
                    )
                    logging.info(train_res)

                    # *********************************************************
                    # early stopping when cur_best_pre_0 is decreasing for ten successive steps.
                    cur_best_pre_0, stopping_step, should_stop = early_stopping(ret['hit_ratio'][0], cur_best_pre_0,
                                                                                stopping_step, expected_order='acc',
                                                                                flag_step=10)
                    if stopping_step == 0:
                        cur_best_ret = ret
                        cur_best_epoch = epoch

                    if should_stop:
                        break

                    """save weight"""
                    if ret['hit_ratio'][0] == cur_best_pre_0 and args.save:
                        torch.save(model.state_dict(), args.out_dir + str(args.save_model_id) + '_' + str(args.dataset) + '_' + str(args.model) + '.ckpt')

                elif args.stop_train_condition == 'loss':
                    """whether stop train or no """
                    train_res = PrettyTable()
                    train_res.field_names = ["Epoch", "training time", "Loss"]
                    train_res.add_row(
                        [epoch, train_e_t - train_s_t, loss.item()]
                    )
                    logging.info(train_res)

                    cur_best_pre_0, stopping_step, should_stop = early_stopping(loss.item(), cur_best_pre_0,
                                                                                stopping_step, expected_order='dec',
                                                                                flag_step=2)
                    if stopping_step == 0:
                        cur_best_ret = loss.item()
                        cur_best_epoch = epoch

                    if should_stop:
                        break

                    """save weight"""
                    if stopping_step == 0 and args.save:
                        torch.save(model.state_dict(), args.out_dir + str(args.save_model_id) + '_' + str(args.dataset) + '_' + str(args.model)  + '.ckpt')

            else:
                logging.info('using time %.4f, training loss at epoch %d: %.4f' % (train_e_t - train_s_t, epoch, loss.item()))
        logging.info('early stopping at %d, HR@5:%.4f\n' % (epoch, cur_best_pre_0))

        param_res = PrettyTable()
        param_res.field_names = ["model", "version", "dim", "lr", "l2", "sim_regularity", "inverse_r",
                                 "node_dropout", "node_dropout_rate", "mess_dropout", "mess_dropout_rate",
                                 "channel", "n_factors", "ind", "context_hops", "epoch", "batch_size", "test_batch_size", "Ks"]
        param_res.add_row(
            [args.model, args.version, args.dim, args.lr, args.l2, args.sim_regularity, args.inverse_r,
             args.node_dropout, args.node_dropout_rate, args.mess_dropout, args.mess_dropout_rate,
             args.channel, args.n_factors, args.ind, args.context_hops, args.epoch, args.batch_size, args.test_batch_size, args.Ks]
        )
        logging.info(param_res)

        logging.info('Best Iter=')
        if args.stop_train_condition == 'HR':
            best_res = PrettyTable()
            best_res.field_names = ["Epoch", "hit_ratio"]
            best_res.add_row(
                [cur_best_epoch,
                 cur_best_ret['hit_ratio']]
            )
            logging.info(best_res)
        elif args.stop_train_condition == 'loss':
            model.load_state_dict(torch.load(args.out_dir + str(args.save_model_id) + '_' + str(args.dataset) + '_' + str(args.model) + '.ckpt'))
            logging.info("start evaluating ...")
            ret = test(model, user_dict, n_params, recall_items, user_rebuy)
            best_res = PrettyTable()
            best_res.field_names = ["Epoch", "loss", "hit_ratio"]
            best_res.add_row(
                [cur_best_epoch,
                 cur_best_ret,
                 ret['hit_ratio']]
            )
            logging.info(best_res)

    else:
        if args.cuda == False:
            model.load_state_dict(torch.load(args.out_dir + str(args.save_model_id) + '_' + str(args.dataset) + '_' + str(args.model) + '.ckpt', map_location='cpu'))
        elif args.cuda:
            model.load_state_dict(torch.load(args.out_dir + str(args.save_model_id) + '_' + str(args.dataset) + '_' + str(args.model) + '.ckpt'))
        logging.info("start evaluating ...")
        ret = test(model, user_dict, n_params, recall_items, user_rebuy)
        evaluate_res = PrettyTable()
        evaluate_res.field_names = ["hit_ratio"]
        evaluate_res.add_row(
            [ret['hit_ratio']]
        )
        logging.info(evaluate_res)

    logging.info('Best Result=')

    # 把输出的user-top50，转换成可以提交的order-top格式
    user_bprmf_recall_spu = defaultdict(list)

    for line in open(
            'result/'+str(args.save_model_id)+'_top_'+str(args.rebuy_num)+'_'+str(args.dataset)+'_'+str(args.model)+'_'+str(args.test_set)+'.txt'):
        lines = line.strip().split('\t')
        user_bprmf_recall_spu[int(lines[0])].extend([int(a) for a in lines[1:]])
    if 'submit' not in args.dataset: # 针对两周的数据直接可以得到评估的结果
        data = pd.read_table('data/test/three_week_data.txt', sep='\t')
        test_data = data[data['dt'] > 20210620]

        # 设置评估的test
        if args.test_set == 'test_2':
            test_data = test_data[test_data['dt'] < 20210626]

        submit_result = []
        for order, user in zip(test_data['wm_order_id'], test_data['user_id']):
            submit_result.append(user_bprmf_recall_spu[user])

        spu_test_result = []
        for spu in test_data['wm_food_spu_id']:
            spu_test_result.append(spu)

        total = len(submit_result)
        hit = 0
        for i in range(total):
            if spu_test_result[i] in submit_result[i][:5]:
                hit += 1
        logging.info('按照赛题评估指标Hit@5为: %f', hit / total)

        hit = 0
        for i in range(total):
            if spu_test_result[i] in submit_result[i][:40]:
                hit += 1
        logging.info('按照赛题评估指标Hit@40为: %f', hit / total)

        hit = 0
        for i in range(total):
            if spu_test_result[i] in submit_result[i][:50]:
                hit += 1
        logging.info('按照赛题评估指标Hit@50为: %f', hit / total)

    else:  # 针对提交的数据，无法输出评估的结果，但要输出最后可以提交的结果。
        orders_test_spu = pd.read_table('./data/test/orders_test_spu.txt', sep='\t')
        with open('./submit/'+str(args.save_model_id)+'_'+str(args.model)+'_'+str(args.dataset)+'.txt', 'w', encoding='UTF-8') as f:
            for tup in zip(orders_test_spu['user_id'], orders_test_spu['wm_order_id']):
                f.write(str(tup[1]))
                count = 0
                for i in user_bprmf_recall_spu[tup[0]]:
                    f.write('\t')
                    f.write(str(i))
                    count += 1
                    if count == 5:
                        break
                f.write('\n')
