/*
Copyright 2024-2025 The Spice.ai OSS Authors

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

     https://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

use std::collections::HashMap;

/// Calculates the average Normalized Discounted Cumulative Gain (NDCG@k) across all search queries.
///
/// NDCG@k measures the quality of ranking by considering both relevance and position,
/// with higher-ranked relevant documents contributing more to the score. This implementation
/// follows the MTEB (Massive Text Embedding Benchmark) methodology.
///
/// # Arguments
/// * `qrels` - Query relevance judgments mapping `query_id` -> (`doc_id` -> `relevance_score`)
/// * `results` - Search results mapping `query_id` -> (`doc_id` -> `similarity_score`)
/// * `k` - Number of top results to consider for NDCG calculation
///
/// # Returns
/// Average NDCG@k score across all queries (0.0 to 1.0, where 1.0 is perfect ranking)
///
/// # Reference
/// MTEB `RetrievalEvaluator`: <https://github.com/embeddings-benchmark/mteb/blob/03347ebfe4809056e0fd2894fcae69dcdd2ed964/mteb/evaluation/evaluators/RetrievalEvaluator.py#L500>
#[expect(clippy::cast_precision_loss)]
#[must_use]
pub(crate) fn calculate_ndcg<S: ::std::hash::BuildHasher>(
    qrels: &HashMap<String, HashMap<String, i32, S>, S>,
    results: &HashMap<String, HashMap<String, f64, S>, S>,
    k: usize,
) -> f64 {
    let mut ndcg_at_k_values = Vec::new();

    for (query_id, relevance) in qrels {
        if let Some(ranked_results) = results.get(query_id) {
            let relevance_scores: Vec<f64> = ranked_results
                .keys()
                .map(|doc_id| f64::from(*relevance.get(doc_id).unwrap_or(&0)))
                .collect();
            ndcg_at_k_values.push(ndcg_at_k(&relevance_scores, k));
        } else {
            println!("No search results found for test query {query_id}");
        }
    }
    let len = ndcg_at_k_values.len();
    ndcg_at_k_values.into_iter().sum::<f64>() / len as f64
}

#[expect(clippy::cast_precision_loss)]
fn dcg_at_k(relevance_scores: &[f64], k: usize) -> f64 {
    relevance_scores
        .iter()
        .take(k)
        .enumerate()
        .map(|(i, &rel)| rel / (i as f64 + 2f64).log2())
        .sum()
}

fn idcg_at_k(relevance_scores: &[f64], k: usize) -> f64 {
    let mut sorted_relevance_scores = relevance_scores.to_owned();
    sorted_relevance_scores.sort_by(|a, b| b.partial_cmp(a).unwrap_or(std::cmp::Ordering::Equal));
    dcg_at_k(&sorted_relevance_scores, k)
}

fn ndcg_at_k(relevance_scores: &[f64], k: usize) -> f64 {
    let dcg = dcg_at_k(relevance_scores, k);
    let idcg = idcg_at_k(relevance_scores, k);
    if idcg == 0.0 {
        return 0.0;
    }
    dcg / idcg
}
