import numpy as np
import torch as t
from torch.utils.tensorboard import SummaryWriter
from model import PEKN
import os
import shutil


def train(args, data, show_loss, show_topk):  # train 方法需要用到data info  是从 main 的 load data 方法里来的
    n_user, n_item, n_entity, n_relation = data[0], data[1], data[2], data[3]
    train_data, eval_data, test_data = data[4], data[5], data[6]
    adj_entity, adj_relation = data[7], data[8]
    ripple_set = data[9]
    interaction_table, offset = get_interaction_table(train_data, n_entity)
    model = PEKN(args, n_user, n_entity, n_relation, adj_entity, adj_relation, interaction_table, offset)

    # top-K evaluation settings
    user_list, train_record, test_record, item_set, k_list = topk_settings(show_topk, train_data, test_data, n_item)
    if args.use_cuda:
        model.cuda()
    optimizer = t.optim.Adam(
        filter(lambda p: p.requires_grad, model.parameters()),
        args.lr,
        weight_decay=args.l2_weight
    )
    scheduler = t.optim.lr_scheduler.CosineAnnealingLR(optimizer=optimizer, T_max=args.n_epochs)
    # if os.path.exists('log'):
    #     shutil.rmtree('log')
    writer = SummaryWriter(log_dir='log')
    for step in range(args.n_epochs):
        # training
        np.random.shuffle(train_data)
        start = 0
        sum_loss = 0
        sum = 0
        while start < train_data.shape[0]:
            return_dict = model(*get_feed_dict(args, model, train_data, ripple_set, start, start + args.batch_size))
            loss = return_dict['loss']
            sum_loss += loss
            sum += 1
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            start += args.batch_size
            if show_loss:
                print('%.1f%% %.4f' % (start / train_data.shape[0] * 100, loss.item()))

        scheduler.step()
        # print('lr', scheduler.get_last_lr())
        # evaluation
        train_auc, train_acc = evaluation(args, model, train_data, ripple_set, args.batch_size)
        writer.add_scalar('PEKN_MUSIC_TRAIN_AUC', train_auc, step+1)
        writer.add_scalar('PEKN_MUSIC_TRAIN_ACC', train_acc, step+1)
        writer.add_scalar('PEKN_MUSIC_LOSS', sum_loss/sum, step+1)
        eval_auc, eval_acc = evaluation(args, model, eval_data, ripple_set, args.batch_size)
        writer.add_scalar('PEKN_MUSIC_EVAL_AUC', eval_auc, step + 1)
        writer.add_scalar('PEKN_MUSIC_EVAL_ACC', eval_acc, step + 1)
        test_auc, test_acc = evaluation(args, model, test_data, ripple_set, args.batch_size)
        writer.add_scalar('PEKN_MUSIC_TEST_AUC', test_auc, step + 1)
        writer.add_scalar('PEKN_MUSIC_TEST_ACC', test_acc, step + 1)
        print('epoch %d     loss: %.2f    train auc: %.4f  acc: %.4f    eval auc: %.4f  acc: %.4f    test auc: %.4f  acc: %.4f'
                % (step+1, sum_loss/sum, train_auc, train_acc, eval_auc, eval_acc, test_auc, test_acc))
        # top-K evaluation
        if show_topk:
            precision, recall = topk_eval2(
                # sess, model, user_list, train_record, test_record, item_set, k_list, args.batch_size
                args, eval_data, model, user_list, train_record, test_record, item_set, k_list, ripple_set,
                args.batch_size, return_dict
            )
            print('precision: ', end='')
            for i in precision:
                print('%.4f\t' % i, end='')
            print()
            print('recall: ', end='')
            for i in recall:
                print('%.4f\t' % i, end='')
            print('\n')

    writer.close()


def get_feed_dict(args, model, data, ripple_set, start, end): #模型输入一个batch的 user item
    users = t.LongTensor(data[start:end, 0])
    items = t.LongTensor(data[start:end, 1])
    labels = t.LongTensor(data[start:end, 2])
    memories_h, memories_r, memories_t = [], [], [] #[user]
    for i in range(args.n_iter):
        memories_h.append(t.LongTensor([ripple_set[user][i][0] for user in data[start:end, 0]]))
        memories_r.append(t.LongTensor([ripple_set[user][i][1] for user in data[start:end, 0]]))
        memories_t.append(t.LongTensor([ripple_set[user][i][2] for user in data[start:end, 0]]))
    if args.use_cuda:
        users = users.cuda()
        items = items.cuda()
        labels = labels.cuda()
        memories_h = list(map(lambda x: x.cuda(), memories_h))
        memories_r = list(map(lambda x: x.cuda(), memories_r))
        memories_t = list(map(lambda x: x.cuda(), memories_t))
    return users, items, labels, memories_h, memories_r, memories_t


def evaluation(args, model, data, ripple_set, batch_size):
    start = 0
    auc_list = []
    acc_list = []
    model.eval()

    while start < data.shape[0]:
        auc, acc = model.evaluate(*get_feed_dict(args, model, data, ripple_set, start, start + batch_size))
        auc_list.append(auc)
        acc_list.append(acc)
        start += batch_size
    model.train()
    return float(np.mean(auc_list)), float(np.mean(acc_list))




def get_interaction_table(train_data, n_entity):
    offset = len(str(n_entity))
    offset = 10 ** offset
    keys = train_data[:, 0] * offset + train_data[:, 1]
    keys = keys.astype(np.int64)
    values = train_data[:, 2].astype(np.float32)

    keys_tensor = t.tensor(keys)
    values_tensor = t.tensor(values)

    interaction_table = {}
    for key, value in zip(keys_tensor, values_tensor):
        interaction_table[key.item()] = value.item()

    default_value = t.tensor(0.5)

    class LookupFunction(t.nn.Module):
        def __init__(self, table, default):
            super(LookupFunction, self).__init__()
            self.table = table
            self.default = default

        def forward(self, key):
            return self.table.get(key, self.default)

    interaction_table = LookupFunction(interaction_table, default_value)

    return interaction_table, offset



def topk_settings(show_topk, train_data, test_data, n_item):
    if show_topk:
        user_num = 100
        k_list = [1, 2, 5, 10, 20, 50, 100]
        train_record = get_user_record(train_data, True)
        test_record = get_user_record(test_data, False)
        user_list = list(set(train_record.keys()) & set(test_record.keys()))
        if len(user_list) > user_num:
            user_list = np.random.choice(user_list, size=user_num, replace=False)
        item_set = set(list(range(n_item)))
        return user_list, train_record, test_record, item_set, k_list
    else:
        return [None] * 5




def get_user_record(data, is_train):
    user_history_dict = dict()
    for interaction in data:
        user = interaction[0]
        item = interaction[1]
        label = interaction[2]
        if is_train or label == 1:
            if user not in user_history_dict:
                user_history_dict[user] = set()
            user_history_dict[user].add(item)
    return user_history_dict


def topk_eval2(args, data,  model, user_list, train_record, test_record, item_set, k_list, ripple_set, batch_size, return_dict):
    precision_list = {k: [] for k in k_list}
    recall_list = {k: [] for k in k_list}

    for user in user_list:
        test_item_list = list(item_set - train_record[user])
        item_score_map = dict()
        start = 0
        while start + args.batch_size <= len(test_item_list):
            # items, scores = model.get_feed_dict(args, model, data, ripple_set, start, start + batch_size)
            # return_dict = model(*get_feed_dict(args, model, data, ripple_set, start, start + args.batch_size))
            items = t.LongTensor(data[start:start+args.batch_size, 1])
            scores = return_dict['scores']
            for item, score in zip(items, scores):
                item_score_map[item] = score
            start += args.batch_size

        # padding the last incomplete minibatch if exists
        if start < len(test_item_list):
            items, scores = model.get_scores(
                {model.user_indices: [user] * args.batch_size,
                       model.item_indices: test_item_list[start:] + [test_item_list[-1]] * (
                               args.batch_size - len(test_item_list) + start)})
            for item, score in zip(items, scores):
                item_score_map[item] = score

        item_score_pair_sorted = sorted(item_score_map.items(), key=lambda x: x[1], reverse=True)
        item_sorted = [i[0] for i in item_score_pair_sorted]

        for k in k_list:
            hit_num = len(set(item_sorted[:k]) & test_record[user])
            precision_list[k].append(hit_num / k)
            recall_list[k].append(hit_num / len(test_record[user]))

    precision = [np.mean(precision_list[k]) for k in k_list]
    recall = [np.mean(recall_list[k]) for k in k_list]

    return precision, recall