# utils/metrics.py
import numpy as np
import torch
from collections import defaultdict


def recall_at_k(hits_list, k, test_user_dict_len):
    """计算一批用户的 Recall@k。"""
    recalls = [np.sum(hits[:k]) / test_user_dict_len[i] for i, hits in enumerate(hits_list)]
    return recalls


def ndcg_at_k(hits_list, k):
    """计算一批用户的 NDCG@k。"""
    idcg_list = [np.sum(1.0 / np.log2(np.arange(2, k + 2))) for _ in range(len(hits_list))]
    dcg_list = [np.sum(hits[:k] / np.log2(np.arange(2, k + 2))) for hits in hits_list]
    ndcgs = [dcg / idcg if idcg > 0 else 0 for dcg, idcg in zip(dcg_list, idcg_list)]
    return ndcgs


def calc_metrics_at_k(cf_scores, test_user_dict, user_ids, Ks):
    """
    计算一批用户的 Recall@K 和 NDCG@K。
    :param cf_scores: (n_users, n_items) - 用户对所有物品的预测分数。
    :param test_user_dict: 测试集中用户交互过的物品 {user_id: [pos_item_ids]}
    :param user_ids: 评估批次中的用户ID列表。
    :param Ks: 需要计算的K值列表。
    """
    # 1. 获取分数最高的 Top-K 物品的索引
    # 我们只需要计算到最大的 K 值
    max_K = max(Ks)
    _, rank_indices = torch.topk(torch.from_numpy(cf_scores), k=max_K, dim=1)
    rank_indices = rank_indices.cpu().numpy()

    # 2. 准备每个用户的真实正样本集合和数量
    test_pos_item_sets = [set(test_user_dict.get(u, [])) for u in user_ids]
    test_pos_item_lens = [len(s) for s in test_pos_item_sets]

    # 3. 计算每个推荐位置是否命中 (hit)
    hits_list = []
    for i in range(len(user_ids)):
        pred_items = rank_indices[i]
        pos_set = test_pos_item_sets[i]
        if not pos_set:
            hits_list.append(np.zeros(max_K, dtype=np.float32))
            continue
        hits = np.isin(pred_items, list(pos_set))
        hits_list.append(hits)

    # 4. 遍历所有 K 值，计算指标
    metrics = defaultdict(list)
    for k in Ks:
        # 过滤掉没有正样本的用户，避免除零错误
        valid_indices = [i for i, length in enumerate(test_pos_item_lens) if length > 0]
        if not valid_indices: continue

        valid_hits_list = [hits_list[i] for i in valid_indices]
        valid_test_lens = [test_pos_item_lens[i] for i in valid_indices]

        recalls = recall_at_k(valid_hits_list, k, valid_test_lens)
        ndcgs = ndcg_at_k(valid_hits_list, k)

        metrics[f'Recall@{k}'].extend(recalls)
        metrics[f'NDCG@{k}'].extend(ndcgs)

    return metrics
