import numpy as np
from dataloader import BasicDataset
from sklearn.metrics import  roc_auc_score

def auc(groundTruth):
    '''
    一个batch里面，每个用户的auc值
    :param groundTruth:  np.array() (user_size, all_item/topk)
    :return:  (user_size, each user's auc)
    '''
    res = []
    for user_topK_label in groundTruth:
        num_pos = np.sum(user_topK_label)
        user_topK_label = user_topK_label[::-1]
        total_index = 0
        for i, label in enumerate(user_topK_label):
            if label > 0:
                total_index += i
        num_need_pairs = total_index - num_pos * (num_pos - 1) / 2
        num_pairs = (num_pos*(len(user_topK_label) - num_pos))
        if num_pos == 0:
            res.append(0)
        elif num_pos == len(user_topK_label):
            res.append(1)
        else:
            res.append(num_need_pairs/num_pairs)
    return res


def MRRatK(ground_truth, k=0):
    """
    Mean Reciprocal Rank
    考虑每个用户topk中的mrr值
    :param groundTruth:  np.array() (user_size, 前一部分物品)

    """
    if k >= len(ground_truth[0]) or k == 0:
        k = len(ground_truth[0])
    pred_data = (ground_truth[:, :k]/np.arange(1, k+1)).max(1)
    return np.sum(pred_data)
    # return pred_data

def AUC(all_item_scores, dataset, test_data):
    """
        design for a single user
    """
    dataset : BasicDataset
    r_all = np.zeros((dataset.m_items, ))
    r_all[test_data] = 1
    r = r_all[all_item_scores >= 0]
    test_item_scores = all_item_scores[all_item_scores >= 0]
    return roc_auc_score(r, test_item_scores)


def RecallPrecision_ATk(test_data, r, k):
    """
    test_data should be a list? cause users may have different amount of pos items. shape (test_batch, k)
    pred_data : shape (test_batch, k) NOTE: pred_data should be pre-sorted
    k : top-k
    """
    right_pred = r[:, :k].sum(1)
    precis_n = k
    recall_n = np.array([len(test_data[i]) for i in range(len(test_data))])
    recall = np.sum(right_pred/recall_n)
    precis = np.sum(right_pred)/precis_n
    return {'recall': recall, 'precision': precis}


def MRRatK_r(r, k):
    """
    Mean Reciprocal Rank
    """
    pred_data = r[:, :k]
    # scores = np.log2(1./np.arange(1, k+1))
    pred_data = pred_data/np.arange(1, k+1)
    pred_data = pred_data.max(1)
    return np.sum(pred_data)

def NDCGatK_r(test_data,r,k):
    """
    Normalized Discounted Cumulative Gain
    rel_i = 1 or 0, so 2^{rel_i} - 1 = 1 or 0
    """
    assert len(r) == len(test_data)
    pred_data = r[:, :k]

    test_matrix = np.zeros((len(pred_data), k))
    for i, items in enumerate(test_data):
        length = k if k <= len(items) else len(items)
        test_matrix[i, :length] = 1
    max_r = test_matrix
    idcg = np.sum(max_r * 1./np.log2(np.arange(2, k + 2)), axis=1)
    dcg = pred_data*(1./np.log2(np.arange(2, k + 2)))
    dcg = np.sum(dcg, axis=1)
    idcg[idcg == 0.] = 1.
    ndcg = dcg/idcg
    ndcg[np.isnan(ndcg)] = 0.
    return np.sum(ndcg)


if __name__ == "__main__":
    # batch_ground_truth = [
    #     [True, False, True],
    #     [False, True, False],
    #     [True, True, True],
    #     [False, False, False]
    # ]
    # print(MRRatK(np.array(batch_ground_truth), 3))

    prediction1 = np.random.randint(0, 60, 10)/100
    prediction2 = np.random.randint(50, 100, 10)/100
    prediction = np.concatenate([prediction1, prediction2])
    gt = [0]*10 + [1]*10
    pred_rank = prediction.argsort()[::-1]
    binary_gt_rank = np.array([gt[pred_rank[i]] for i in range(len(pred_rank))])
    print(prediction)
    print(auc([binary_gt_rank[:15]]))
