#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Evaluation metrics for collaborative filtering recommendation systems.

Created on: 2025-04-18
Author: Nianqing Liu
"""

import numpy as np
from sklearn.metrics import mean_squared_error, mean_absolute_error


def calculate_rmse(y_true, y_pred):
    """
    Calculate Root Mean Squared Error.

    Parameters:
    -----------
    y_true : array-like
        True ratings.
    y_pred : array-like
        Predicted ratings.

    Returns:
    --------
    float
        RMSE value.
    """
    return np.sqrt(mean_squared_error(y_true, y_pred))


def calculate_mae(y_true, y_pred):
    """
    Calculate Mean Absolute Error.

    Parameters:
    -----------
    y_true : array-like
        True ratings.
    y_pred : array-like
        Predicted ratings.

    Returns:
    --------
    float
        MAE value.
    """
    return mean_absolute_error(y_true, y_pred)


def evaluate_rating_predictions(test_data, predicted_ratings):
    """
    Evaluate rating predictions on test data.

    Parameters:
    -----------
    test_data : pandas.DataFrame
        Test dataset containing user_id, item_id, and rating columns.
    predicted_ratings : numpy.ndarray
        Predicted rating matrix.

    Returns:
    --------
    dict
        Dictionary containing RMSE and MAE values.
    """
    # Extract test data
    test_user_ids = test_data["user_id"].values - 1  # Adjust to 0-based
    test_item_ids = test_data["item_id"].values - 1  # Adjust to 0-based
    test_ratings = test_data["rating"].values

    # Get predicted ratings for test pairs
    pred_ratings = predicted_ratings[test_user_ids, test_item_ids]

    # Calculate metrics
    rmse = calculate_rmse(test_ratings, pred_ratings)
    mae = calculate_mae(test_ratings, pred_ratings)

    return {"rmse": rmse, "mae": mae}


def calculate_precision_recall_at_k(
    test_data, predicted_ratings, ratings_matrix=None, k=10, threshold=3.5
):
    """
    Calculate precision and recall at k for recommendation evaluation.

    This function evaluates recommendation performance by calculating precision
    and recall metrics for top-k recommendations for each user in the test set.

    Parameters:
    -----------
    test_data : pandas.DataFrame
        Test dataset containing user_id, item_id, and rating columns.
    predicted_ratings : numpy.ndarray
        Matrix of predicted ratings where rows represent users and columns represent items.
    ratings_matrix : numpy.ndarray, optional
        Original rating matrix showing which items users have already rated.
        If provided, improves accuracy by excluding already rated items from recommendations.
    k : int, optional
        Number of recommendations to consider (default: 10).
    threshold : float, optional
        Rating threshold for considering an item relevant (default: 3.5).
        Items with ratings >= threshold are considered relevant.

    Returns:
    --------
    dict
        Dictionary containing precision@k and recall@k values.
    """
    # Get unique users in test data
    users = test_data["user_id"].unique()

    precisions = []
    recalls = []

    for user in users:
        # Convert to 0-based indexing
        user_idx = user - 1

        # Get user's test items and ratings
        user_test_data = test_data[test_data["user_id"] == user]
        user_test_items = user_test_data["item_id"].values - 1  # Convert to 0-based
        user_test_ratings = user_test_data["rating"].values

        # Identify relevant items (those with ratings above threshold)
        relevant_items = user_test_items[user_test_ratings >= threshold]

        if len(relevant_items) == 0:
            continue  # Skip users with no relevant items in test set

        # Get user's predictions
        user_predictions = predicted_ratings[user_idx]

        # Find items the user hasn't rated yet
        if ratings_matrix is not None:
            # If ratings matrix is provided, identify all rated items from it
            rated_items = np.where(ratings_matrix[user_idx] > 0)[0]
        else:
            # Fall back to using only test items as proxy for rated items
            rated_items = user_test_items

        # Get unrated items (potential recommendation candidates)
        unrated_items = np.setdiff1d(np.arange(len(user_predictions)), rated_items)

        # If all items are rated, skip this user
        if len(unrated_items) == 0:
            continue

        # Get predicted ratings for unrated items
        unrated_predictions = user_predictions[unrated_items]

        # Sort in descending order of predicted rating
        sorted_indices = np.argsort(-unrated_predictions)

        # Limit to top-k and handle case where there are fewer than k unrated items
        k_limited = min(k, len(sorted_indices))
        if k_limited == 0:
            continue

        # Get top-k recommended items
        top_items = unrated_items[sorted_indices[:k_limited]]

        # Calculate number of relevant recommended items
        recommended_relevant = np.intersect1d(top_items, relevant_items)

        # Calculate precision and recall
        precision = len(recommended_relevant) / k_limited
        recall = len(recommended_relevant) / len(relevant_items)

        precisions.append(precision)
        recalls.append(recall)

    # Calculate average precision and recall
    avg_precision = np.mean(precisions) if precisions else 0
    avg_recall = np.mean(recalls) if recalls else 0

    return {f"precision@{k}": avg_precision, f"recall@{k}": avg_recall}


def calculate_f1_score(precision, recall):
    """
    Calculate F1 score from precision and recall.

    Parameters:
    -----------
    precision : float
        Precision value.
    recall : float
        Recall value.

    Returns:
    --------
    float
        F1 score.
    """
    if precision + recall == 0:
        return 0

    return 2 * (precision * recall) / (precision + recall)


def evaluate_recommendations(
    test_data, predicted_ratings, ratings_matrix=None, k_values=[5, 10], threshold=3.5
):
    """
    Perform comprehensive evaluation of recommendation system performance.

    This function evaluates both the rating prediction accuracy (RMSE, MAE) and
    the recommendation quality through precision, recall, and F1 metrics at
    different k values.

    Parameters:
    -----------
    test_data : pandas.DataFrame
        Test dataset containing user_id, item_id, and rating columns.
    predicted_ratings : numpy.ndarray
        Matrix of predicted ratings where rows represent users and columns represent items.
    ratings_matrix : numpy.ndarray, optional
        Original rating matrix showing which items users have already rated.
        Improves evaluation accuracy by properly identifying unrated items.
    k_values : list, optional
        List of k values for precision/recall calculation (default: [5, 10]).
    threshold : float, optional
        Rating threshold for considering an item relevant (default: 3.5).

    Returns:
    --------
    dict
        Dictionary containing all evaluation metrics:
        - rmse: Root Mean Squared Error
        - mae: Mean Absolute Error
        - precision@k: Precision at k for each k in k_values
        - recall@k: Recall at k for each k in k_values
        - f1@k: F1 score at k for each k in k_values
    """
    # Evaluate rating predictions
    rating_metrics = evaluate_rating_predictions(test_data, predicted_ratings)

    # Initialize results with rating metrics
    results = rating_metrics.copy()

    # Evaluate top-k recommendations for each k value
    for k in k_values:
        pr_metrics = calculate_precision_recall_at_k(
            test_data,
            predicted_ratings,
            ratings_matrix=ratings_matrix,
            k=k,
            threshold=threshold,
        )

        precision_key = f"precision@{k}"
        recall_key = f"recall@{k}"

        results[precision_key] = pr_metrics[precision_key]
        results[recall_key] = pr_metrics[recall_key]

        # Calculate and include F1 score
        precision = pr_metrics[precision_key]
        recall = pr_metrics[recall_key]
        f1 = calculate_f1_score(precision, recall)
        results[f"f1@{k}"] = f1

    return results
