from typing import Dict, List
import torch
from tqdm import tqdm
from utils import recall_at_k, ndcg_at_k, precision_at_k, hit_ratio_at_k

def _prepare_scores(scores: torch.Tensor, train_items: List[int]):
    if train_items:
        scores[train_items] = float('-inf')
    return scores

def test(model, data, top_k: List[int], batch_size: int) -> Dict[str, List[float]]:
    model.eval()
    max_k = max(top_k)
    metrics = {
        "recall": [0.0 for _ in top_k],
        "ndcg": [0.0 for _ in top_k],
        "precision": [0.0 for _ in top_k],
        "hit": [0.0 for _ in top_k],
    }
    valid_users = 0

    test_loader = data.get_test_loader(batch_size)

    for users, test_item_lists, train_item_lists in tqdm(test_loader, desc="[Eval]", leave=False):
        if users.numel() == 0:
            continue

        users = users.to(model.device)
        scores = model.predict(users).detach().cpu()

        for idx, user_scores in enumerate(scores):
            ground_truth = test_item_lists[idx]
            if not ground_truth:
                continue

            filtered_scores = _prepare_scores(user_scores.clone(), train_item_lists[idx])
            top_items = torch.topk(filtered_scores, max_k).indices.tolist()

            valid_users += 1
            for i, k in enumerate(top_k):
                metrics["recall"][i] += recall_at_k(ground_truth, top_items, k)
                metrics["ndcg"][i] += ndcg_at_k(ground_truth, top_items, k)
                metrics["precision"][i] += precision_at_k(ground_truth, top_items, k)
                metrics["hit"][i] += hit_ratio_at_k(ground_truth, top_items, k)

    if valid_users == 0:
        return metrics

    for key in metrics:
        metrics[key] = [value / valid_users for value in metrics[key]]
    return metrics