#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Rating prediction functions for item-based collaborative filtering recommendation system.

Created on: 2025-04-20
Author: Nianqing Liu
"""

import numpy as np


def simple_weighted_average(ratings_matrix, similarity_matrix, k=10):
    """
    Predict ratings using simple weighted average of similar items' ratings.

    Parameters:
    -----------
    ratings_matrix : numpy.ndarray
        User-item rating matrix.
    similarity_matrix : numpy.ndarray
        Item-item similarity matrix.
    k : int
        Number of similar items to consider (default 10).

    Returns:
    --------
    numpy.ndarray
        Predicted rating matrix.
    """
    n_users, n_items = ratings_matrix.shape
    predicted_ratings = np.zeros((n_users, n_items))

    # For each user
    for u in range(n_users):
        # Get the user's ratings
        user_ratings = ratings_matrix[u]

        # Find items the user has rated
        rated_items = np.where(user_ratings > 0)[0]

        # If the user hasn't rated any items, skip
        if len(rated_items) == 0:
            continue

        # For each item
        for i in range(n_items):
            # Skip if the user has already rated this item
            if user_ratings[i] > 0:
                predicted_ratings[u, i] = user_ratings[i]
                continue

            # Get similarities between this item and all items the user has rated
            item_similarities = similarity_matrix[i, rated_items]

            # Get user ratings for those items
            user_rated_items = user_ratings[rated_items]

            # Sort similarities to get top-k
            if k < len(item_similarities):
                # Get indices of top-k similar items
                top_indices = np.argsort(item_similarities)[::-1][:k]

                # Select top similarities and corresponding ratings
                top_similarities = item_similarities[top_indices]
                top_ratings = user_rated_items[top_indices]
            else:
                top_similarities = item_similarities
                top_ratings = user_rated_items

            # Calculate weighted sum
            sim_sum = np.sum(np.abs(top_similarities))

            if sim_sum > 0:
                weighted_sum = np.sum(top_similarities * top_ratings)
                predicted_ratings[u, i] = weighted_sum / sim_sum

    return predicted_ratings


def weighted_average_with_baseline(ratings_matrix, similarity_matrix, k=10):
    """
    Predict ratings using weighted average with global and user biases.

    Parameters:
    -----------
    ratings_matrix : numpy.ndarray
        User-item rating matrix.
    similarity_matrix : numpy.ndarray
        Item-item similarity matrix.
    k : int
        Number of similar items to consider (default 10).

    Returns:
    --------
    numpy.ndarray
        Predicted rating matrix.
    """
    n_users, n_items = ratings_matrix.shape
    predicted_ratings = np.zeros((n_users, n_items))

    # Calculate global average
    rated_mask = ratings_matrix > 0
    global_mean = np.sum(ratings_matrix) / np.sum(rated_mask)

    # Calculate user biases (deviation from global average)
    user_sums = np.sum(ratings_matrix, axis=1)
    user_counts = np.sum(rated_mask, axis=1)

    # Avoid division by zero
    user_means = np.zeros_like(user_sums, dtype=float)
    nonzero_counts = user_counts > 0
    user_means[nonzero_counts] = user_sums[nonzero_counts] / user_counts[nonzero_counts]
    user_biases = user_means - global_mean

    # Calculate item biases
    item_sums = np.sum(ratings_matrix, axis=0)
    item_counts = np.sum(rated_mask, axis=0)

    # Avoid division by zero
    item_means = np.zeros_like(item_sums, dtype=float)
    nonzero_counts = item_counts > 0
    item_means[nonzero_counts] = item_sums[nonzero_counts] / item_counts[nonzero_counts]
    item_biases = item_means - global_mean

    # For each user
    for u in range(n_users):
        # Get the user's ratings
        user_ratings = ratings_matrix[u]

        # Find items the user has rated
        rated_items = np.where(user_ratings > 0)[0]

        # If the user hasn't rated any items, use baseline
        if len(rated_items) == 0:
            for i in range(n_items):
                predicted_ratings[u, i] = global_mean + user_biases[u] + item_biases[i]
            continue

        # User's bias
        user_bias = user_biases[u]

        # For each item
        for i in range(n_items):
            # Skip if the user has already rated this item
            if user_ratings[i] > 0:
                predicted_ratings[u, i] = user_ratings[i]
                continue

            # Get item bias
            item_bias = item_biases[i]

            # Calculate baseline prediction
            baseline = global_mean + user_bias + item_bias

            # Get similarities between this item and all items the user has rated
            item_similarities = similarity_matrix[i, rated_items]

            # Get user ratings for those items
            user_rated_items = user_ratings[rated_items]

            # Adjust ratings to account for baseline biases
            adjusted_ratings = user_rated_items - (
                global_mean + user_bias + item_biases[rated_items]
            )

            # Sort similarities to get top-k
            if k < len(item_similarities):
                # Get indices of top-k similar items
                top_indices = np.argsort(item_similarities)[::-1][:k]

                # Select top similarities and corresponding adjusted ratings
                top_similarities = item_similarities[top_indices]
                top_adjusted_ratings = adjusted_ratings[top_indices]
            else:
                top_similarities = item_similarities
                top_adjusted_ratings = adjusted_ratings

            # Calculate weighted deviation
            sim_sum = np.sum(np.abs(top_similarities))

            if sim_sum > 0:
                weighted_deviation = (
                    np.sum(top_similarities * top_adjusted_ratings) / sim_sum
                )
                predicted_rating = baseline + weighted_deviation

                # Clip the predicted rating to the valid range [1,5]
                predicted_ratings[u, i] = max(1, min(5, predicted_rating))
            else:
                # Use baseline if no similar items
                predicted_ratings[u, i] = max(1, min(5, baseline))

    return predicted_ratings


def predict_ratings(
    ratings_matrix, similarity_matrix, method="weighted_average_with_baseline", k=10
):
    """
    Predict ratings using the specified method.

    Parameters:
    -----------
    ratings_matrix : numpy.ndarray
        User-item rating matrix.
    similarity_matrix : numpy.ndarray
        Item-item similarity matrix.
    method : str
        Prediction method: 'simple_weighted' or 'weighted_average_with_baseline'.
    k : int
        Number of similar items to consider.

    Returns:
    --------
    numpy.ndarray
        Predicted rating matrix.
    """
    if method == "simple_weighted":
        return simple_weighted_average(ratings_matrix, similarity_matrix, k)
    elif method == "weighted_average_with_baseline":
        return weighted_average_with_baseline(ratings_matrix, similarity_matrix, k)
    else:
        raise ValueError(
            f"Unknown prediction method: {method}. Choose from 'simple_weighted' or 'weighted_average_with_baseline'."
        )
