# evaluation.py

import logging
import numpy as np
from tqdm import tqdm
from llama_index.finetuning import EmbeddingQAFinetuneDataset
from llama_index.core.base.base_retriever import BaseRetriever

logger = logging.getLogger(__name__)

def evaluate_ndcg_at_k(
    dataset: EmbeddingQAFinetuneDataset, retriever: BaseRetriever, k: int = 10
) -> float:
    """
    Calculates the mean NDCG@k for a given dataset and retriever.

    Args:
        dataset: The dataset containing queries and relevant documents.
        retriever: The retriever to evaluate.
        k: The cutoff for NDCG calculation.

    Returns:
        The mean NDCG@k score.
    """
    logger.info(f"Starting evaluation for NDCG@{k}...")
    queries = dataset.queries
    relevant_docs = dataset.relevant_docs
    ndcg_scores = []

    for query_id, query in tqdm(queries.items(), desc="Evaluating Queries"):
        retrieved_nodes = retriever.retrieve(query)
        retrieved_ids = [node.node.node_id for node in retrieved_nodes]
        expected_ids = relevant_docs.get(query_id, [])

        if not expected_ids:
            continue

        # Calculate DCG
        dcg = 0.0
        for i, r_id in enumerate(retrieved_ids[:k]):
            if r_id in expected_ids:
                dcg += 1.0 / np.log2(i + 2)

        # Calculate IDCG
        idcg = 0.0
        for i in range(min(k, len(expected_ids))):
            idcg += 1.0 / np.log2(i + 2)

        ndcg = dcg / idcg if idcg > 0 else 0
        ndcg_scores.append(ndcg)

    mean_ndcg = np.mean(ndcg_scores) if ndcg_scores else 0.0
    logger.info(f"Evaluation finished. Mean NDCG@{k}: {mean_ndcg:.4f}")
    return mean_ndcg