# -*- coding:utf-8 -*-
import numpy as np
from collections import OrderedDict

EVAL_NAMES = ["nDCG@1", "nERR@10", "pPlus"]


def _cul_ndcg_k(labels, preds, k=1):
    rec = []
    for pred, label in zip(preds, labels):
        rec.append((pred, label))
    rel = sorted(rec, key=lambda s: s[0], reverse=True)[:k]
    irel = sorted(rec, key=lambda s: s[1], reverse=True)[:k]

    rel_score = sum([2 ** l - 1 for (_, l) in rel])
    irel_score = sum([2 ** l - 1 for (_, l) in irel])

    if irel_score == 0.0:
        return 1.0
    ndcg_k = rel_score / irel_score
    return ndcg_k


def eval_ndcg_k(y_ture, y_pred, groups, k=1):
    sum_ndcg = 0.0
    last_index = 0
    cur_index = 0
    for g in groups:
        cur_index = cur_index + g
        ndcg1 = _cul_ndcg_k(y_ture[last_index:cur_index], y_pred[last_index:cur_index], k=k)
        sum_ndcg += ndcg1
        last_index = cur_index
    assert cur_index == len(y_pred)
    return sum_ndcg / len(groups)


def _cul_pr(rels):
    ":type numpy.ndarray"
    return (2 ** rels - 1) / 4.0


def _cul_nerr(idea_rels, pred_rels, k):
    rec = []
    for irel, prel in zip(idea_rels, pred_rels):
        rec.append((irel, prel))

    # rank
    ranked_irels = sorted(rec, key=lambda r: r[0], reverse=True)
    ranked_prels = sorted(rec, key=lambda r: r[1], reverse=True)

    ranked_irels = [r[0] for r in ranked_irels[:k]]  # the idea rank(just consider the top k)
    ranked_prels = [r[0] for r in ranked_prels[:k]]  # the predict rank(just consider the top k)

    ranked_irels = np.asarray(ranked_irels)
    ranked_prels = np.asarray(ranked_prels)

    pprs = _cul_pr(ranked_prels)
    iprs = _cul_pr(ranked_irels)

    neg_pprs = np.ones(k + 1)
    neg_iprs = np.ones(k + 1)

    for i, (ppr, ipr) in enumerate(zip(pprs, iprs)):
        neg_pprs[i + 1] = (1.0 - ppr) * neg_pprs[i]
        neg_iprs[i + 1] = (1.0 - ipr) * neg_iprs[i]

    sum_pnerr = 0.0
    sum_inerr = 0.0

    for r in range(min(k, len(rec))):
        sum_pnerr += pprs[r] * neg_pprs[r] / (r + 1)
        sum_inerr += iprs[r] * neg_iprs[r] / (r + 1)

    if sum_inerr == 0:
        return 1.0

    return sum_pnerr / sum_inerr


def eval_nerr(idea_rels, pred_rels, groups, k=10):
    idea_rels = np.asarray(idea_rels)
    pred_rels = np.asarray(pred_rels)

    nerr_scores = []
    last_index = 0
    for g_len in groups:
        cur_index = last_index + g_len
        nerr_k = _cul_nerr(idea_rels[last_index:cur_index], pred_rels[last_index:cur_index], k)
        nerr_scores.append(nerr_k)
        last_index = cur_index

    return np.mean(nerr_scores)


def _cul_pplus(idea_rels, pred_rels):
    rec = []
    for irel, prel in zip(idea_rels, pred_rels):
        rec.append((irel, prel))

    # rank
    ranked_irels = sorted(rec, key=lambda r: r[0], reverse=True)
    ranked_prels = sorted(rec, key=lambda r: r[1], reverse=True)

    ranked_irels = [r[0] for r in ranked_irels]  # the idea rank
    ranked_prels = [r[0] for r in ranked_prels]  # the predict rank

    rp = np.argmax(ranked_prels) + 1

    si = [0]
    sg = [0]
    sgi = [0]

    sum_pps = 0.0
    for r in range(rp):
        i = 0
        if ranked_prels[r] > 0:
            i = 1
        g = 2 ** ranked_prels[r] - 1
        gi = 2 ** ranked_irels[r] - 1

        si.append(si[-1] + i)
        sg.append(sg[-1] + g)
        sgi.append(sgi[-1] + gi)

        br_r = (si[-1] + sg[-1]) * 1.0 / (r + 1 + sgi[-1])
        sum_pps += br_r * i

    # print(si)
    if si[-1] == 0:
        return 1.0
    return sum_pps / si[-1]
    pass


def eval_pplus(idea_rels, pred_rels, groups):
    idea_rels = np.asarray(idea_rels)
    pred_rels = np.asarray(pred_rels)

    pplus_scores = []
    last_index = 0
    for g_len in groups:
        cur_index = last_index + g_len
        pplus = _cul_pplus(idea_rels[last_index:cur_index], pred_rels[last_index:cur_index])
        pplus_scores.append(pplus)
        last_index = cur_index

    return np.mean(pplus_scores)


def _cul_rr(idea_rels, pred_rels):
    pairs = list(zip(pred_rels, idea_rels))
    sorted_pairs = sorted(pairs, key=lambda p: p[0], reverse=True)
    for i, (pred_rel, idea_pred) in enumerate(sorted_pairs):
        if idea_pred > 0:
            return 1.0 / (i + 1)
    return 1.0
    pass


def eval_mrr(idea_rels, pred_rels, groups):
    mrr_scores = []
    last_index = 0
    for g_len in groups:
        cur_index = last_index + g_len
        rr = _cul_rr(idea_rels[last_index:cur_index], pred_rels[last_index:cur_index])
        mrr_scores.append(rr)
        last_index = cur_index

    return np.mean(mrr_scores)


def _cul_ap(idea_rels, pred_rels):
    rank = 0
    pairs = list(zip(pred_rels, idea_rels))
    sorted_pairs = sorted(pairs, key=lambda p: p[0], reverse=True)
    ap_score = 0.0
    for i, (pred_rel, idea_rel) in enumerate(sorted_pairs):
        if idea_rel > 0:
            rank += 1
            ap_score += (rank * 1.0 / (i + 1))

    return ap_score / rank if rank > 0 else 1.0
    pass


def eval_map(idea_rels, pred_rels, groups):
    map_scores = []
    last_index = 0
    for g_len in groups:
        cur_index = last_index + g_len
        ap = _cul_ap(idea_rels[last_index:cur_index], pred_rels[last_index:cur_index])
        map_scores.append(ap)
        last_index = cur_index

    return np.mean(map_scores)


def run_rank_evaluate(labels, predictions, groups):
    ndcg1 = eval_ndcg_k(labels, predictions, groups, k=1)
    ndcg3 = eval_ndcg_k(labels, predictions, groups, k=3)
    ndcg5 = eval_ndcg_k(labels, predictions, groups, k=5)

    nerr10 = eval_nerr(labels, predictions, groups, k=10)
    pplus = eval_pplus(labels, predictions, groups)
    map_ = eval_map(labels, predictions, groups)
    mrr = eval_mrr(labels, predictions, groups)

    res_dict = OrderedDict()
    res_dict["nDCG@1"] = ndcg1
    res_dict["nDCG@3"] = ndcg3
    res_dict["nDCG@5"] = ndcg5
    res_dict['MAP'] = map_
    res_dict['MRR'] = mrr
    res_dict['nERR@10'] = nerr10
    res_dict['pPlus'] = pplus

    return res_dict
    pass

