#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Complete recommendation model integrating all components of the item-based collaborative filtering system.
With added logging functionality.

Created on: 2025-04-20
Author: Nianqing Liu
"""

import os
import numpy as np
import pandas as pd
import time
from .similarity import calculate_similarity
from .prediction import predict_ratings
from .recommendation import (
    recommend_items,
    get_popular_items,
    recommend_similar_items,
    get_top_rated_items_for_user,
)
from .evaluation import evaluate_recommendations
from .logger import get_logger


class ItemBasedCF:
    """
    Item-Based Collaborative Filtering Recommendation System.

    This class implements a complete recommendation system using item-based
    collaborative filtering algorithm. It identifies similar items based on rating
    patterns and recommends items similar to those the user has rated highly.
    """

    def __init__(
        self,
        similarity_method="adjusted_cosine",
        prediction_method="weighted_average_with_baseline",
        k=30,
    ):
        """
        Initialize the ItemBasedCF model.

        Parameters:
        -----------
        similarity_method : str
            Method to calculate item similarity: 'cosine', 'pearson', 'euclidean', 'adjusted_cosine'
        prediction_method : str
            Method to predict ratings: 'simple_weighted', 'weighted_average_with_baseline'
        k : int
            Number of similar items to consider for predictions
        """
        self.logger = get_logger()
        self.logger.info(
            f"Initializing ItemBasedCF model with similarity_method={similarity_method}, "
            f"prediction_method={prediction_method}, k={k}"
        )

        self.similarity_method = similarity_method
        self.prediction_method = prediction_method
        self.k = k

        # Initialize data attributes
        self.ratings_df = None
        self.movies_df = None
        self.train_data = None
        self.test_data = None
        self.ratings_matrix = None
        self.item_similarity = None
        self.predicted_ratings = None
        self.n_users = None
        self.n_items = None
        self.user_means = None  # Added for adjusted cosine calculation

        # Track if model is trained
        self.is_trained = False

    def fit(self, ratings_df, movies_df=None, test_size=0.2, random_state=42):
        """
        Train the recommendation model.

        Parameters:
        -----------
        ratings_df : pandas.DataFrame
            DataFrame containing rating data with columns 'user_id', 'item_id', 'rating'
        movies_df : pandas.DataFrame, optional
            DataFrame containing movie information with at least 'movie_id' and 'movie_title'
        test_size : float, optional
            Fraction of data to use for testing, default 0.2
        random_state : int, optional
            Random seed for train/test split, default 42

        Returns:
        --------
        self : ItemBasedCF
            Trained model instance
        """
        from sklearn.model_selection import train_test_split

        self.logger.info("Starting model training")
        train_start = time.time()

        # Store data
        self.ratings_df = ratings_df
        self.movies_df = movies_df

        # Get dimensions
        self.n_users = ratings_df["user_id"].max()
        self.n_items = ratings_df["item_id"].max()

        self.logger.info(
            f"Training model with {self.n_users} users and {self.n_items} items"
        )

        # Split data if test_size > 0
        if test_size > 0:
            self.logger.info(
                f"Splitting data with test_size={test_size}, random_state={random_state}"
            )
            split_start = time.time()
            self.train_data, self.test_data = train_test_split(
                ratings_df, test_size=test_size, random_state=random_state
            )
            split_time = time.time() - split_start

            self.logger.info(
                f"Split data in {split_time:.2f}s: {len(self.train_data)} training and {len(self.test_data)} testing samples"
            )
        else:
            self.train_data = ratings_df
            self.test_data = None
            self.logger.info("Using all data for training (no test split)")

        # Create rating matrix from training data
        self.logger.info("Creating rating matrix from training data")
        matrix_start = time.time()
        self.ratings_matrix = np.zeros((self.n_users, self.n_items))

        for row in self.train_data.itertuples():
            # Adjust for 0-based indexing
            user_idx = row.user_id - 1
            item_idx = row.item_id - 1
            self.ratings_matrix[user_idx, item_idx] = row.rating

        matrix_time = time.time() - matrix_start
        self.logger.info(f"Rating matrix created in {matrix_time:.2f}s")

        # Calculate matrix density
        n_ratings = np.sum(self.ratings_matrix > 0)
        density = n_ratings / (self.n_users * self.n_items)
        self.logger.info(f"Rating matrix density: {density:.6f} ({n_ratings} ratings)")

        # Calculate user means for adjusted cosine
        if self.similarity_method == "adjusted_cosine":
            self.logger.info("Calculating user means for adjusted cosine similarity")
            rated_mask = self.ratings_matrix > 0
            user_sums = np.sum(self.ratings_matrix, axis=1)
            user_counts = np.sum(rated_mask, axis=1)
            self.user_means = np.zeros_like(user_sums, dtype=float)
            nonzero_counts = user_counts > 0
            self.user_means[nonzero_counts] = (
                user_sums[nonzero_counts] / user_counts[nonzero_counts]
            )

        # Calculate item similarity
        self.logger.info(
            f"Computing item similarities using {self.similarity_method} method"
        )
        sim_start = time.time()
        self.item_similarity = calculate_similarity(
            self.ratings_matrix,
            method=self.similarity_method,
            user_means=self.user_means,
        )
        sim_time = time.time() - sim_start
        self.logger.info(f"Item similarities computed in {sim_time:.2f}s")

        # Predict ratings
        self.logger.info(
            f"Predicting ratings using {self.prediction_method} method with k={self.k}"
        )
        pred_start = time.time()
        self.predicted_ratings = predict_ratings(
            self.ratings_matrix,
            self.item_similarity,
            method=self.prediction_method,
            k=self.k,
        )
        pred_time = time.time() - pred_start
        self.logger.info(f"Ratings predicted in {pred_time:.2f}s")

        # Mark model as trained
        self.is_trained = True

        total_time = time.time() - train_start
        self.logger.info(f"Model training completed in {total_time:.2f}s")
        return self

    def recommend(self, user_id, top_n=10):
        """
        Generate recommendations for a specific user.

        Parameters:
        -----------
        user_id : int
            ID of the user to get recommendations for (1-based)
        top_n : int, optional
            Number of recommendations to generate, default 10

        Returns:
        --------
        pandas.DataFrame
            DataFrame with recommended items, predicted ratings, and item info
        """
        self.logger.info(f"Generating {top_n} recommendations for user {user_id}")

        if not self.is_trained:
            error_msg = (
                "Model not trained. Call fit() before generating recommendations."
            )
            self.logger.error(error_msg)
            raise ValueError(error_msg)

        if self.movies_df is None:
            error_msg = (
                "Movie information not available. Provide movies_df during fit()."
            )
            self.logger.error(error_msg)
            raise ValueError(error_msg)

        # Check if user_id is valid
        if user_id < 1 or user_id > self.n_users:
            error_msg = f"Invalid user_id. Must be between 1 and {self.n_users}"
            self.logger.error(error_msg)
            raise ValueError(error_msg)

        # Generate recommendations
        rec_start = time.time()
        recommendations = recommend_items(
            user_id,
            self.ratings_matrix,
            self.predicted_ratings,
            self.movies_df,
            top_n=top_n,
        )
        rec_time = time.time() - rec_start

        # If no recommendations (user has rated all items), return popular items
        if len(recommendations) == 0:
            self.logger.warning(
                f"User {user_id} has rated all items. Returning popular items instead."
            )
            pop_start = time.time()
            recommendations = get_popular_items(
                self.ratings_matrix, self.movies_df, top_n=top_n
            )
            pop_time = time.time() - pop_start
            self.logger.info(
                f"Generated {len(recommendations)} popular item recommendations in {pop_time:.2f}s"
            )
        else:
            self.logger.info(
                f"Generated {len(recommendations)} recommendations in {rec_time:.2f}s"
            )

            # Log a few top recommendations
            if not recommendations.empty:
                top_3 = recommendations.head(3)[
                    ["movie_title", "predicted_rating"]
                ].to_dict(orient="records")
                self.logger.info(f"Top 3 recommendations: {top_3}")

        return recommendations

    def evaluate(self, k_values=[5, 10], threshold=3.5):
        """
        Evaluate the model's performance.

        Parameters:
        -----------
        k_values : list, optional
            List of k values for precision/recall calculation, default [5, 10]
        threshold : float, optional
            Rating threshold for relevance in precision/recall, default 3.5

        Returns:
        --------
        dict
            Dictionary containing evaluation metrics
        """
        self.logger.info(
            f"Evaluating model with k_values={k_values}, threshold={threshold}"
        )

        if not self.is_trained:
            error_msg = "Model not trained. Call fit() before evaluation."
            self.logger.error(error_msg)
            raise ValueError(error_msg)

        if self.test_data is None:
            error_msg = "No test data available. Set test_size > 0 during fit()."
            self.logger.error(error_msg)
            raise ValueError(error_msg)

        # Evaluate recommendations
        eval_start = time.time()
        results = evaluate_recommendations(
            self.test_data,
            self.predicted_ratings,
            ratings_matrix=self.ratings_matrix,
            k_values=k_values,
            threshold=threshold,
        )
        eval_time = time.time() - eval_start

        self.logger.info(f"Evaluation completed in {eval_time:.2f}s")

        # Log evaluation metrics
        for metric, value in results.items():
            self.logger.info(f"{metric}: {value:.4f}")

        return results

    def get_similar_items(self, item_id, top_n=10):
        """
        Find items similar to the specified item.

        Parameters:
        -----------
        item_id : int
            ID of the reference item (1-based)
        top_n : int, optional
            Number of similar items to return, default 10

        Returns:
        --------
        pandas.DataFrame
            DataFrame with similar items and similarity scores
        """
        self.logger.info(f"Finding {top_n} items similar to item {item_id}")

        if not self.is_trained:
            error_msg = "Model not trained. Call fit() before getting similar items."
            self.logger.error(error_msg)
            raise ValueError(error_msg)

        if self.movies_df is None:
            error_msg = (
                "Movie information not available. Provide movies_df during fit()."
            )
            self.logger.error(error_msg)
            raise ValueError(error_msg)

        # Check if item_id is valid
        if item_id < 1 or item_id > self.n_items:
            error_msg = f"Invalid item_id. Must be between 1 and {self.n_items}"
            self.logger.error(error_msg)
            raise ValueError(error_msg)

        # Find similar items
        similar_items = recommend_similar_items(
            item_id, self.item_similarity, self.movies_df, top_n=top_n
        )

        self.logger.info(f"Found {len(similar_items)} similar items")

        # Log a few top similar items
        if not similar_items.empty:
            top_3 = similar_items.head(3).to_dict(orient="records")
            self.logger.info(f"Top 3 similar items: {top_3}")

        return similar_items

    def get_user_top_rated(self, user_id, min_rating=4.0, top_n=5):
        """
        Get the top-rated items for a specific user.

        Parameters:
        -----------
        user_id : int
            ID of the user (1-based)
        min_rating : float, optional
            Minimum rating to consider an item as liked, default 4.0
        top_n : int, optional
            Number of top-rated items to return, default 5

        Returns:
        --------
        pandas.DataFrame
            DataFrame with the user's top-rated items
        """
        self.logger.info(f"Getting top {top_n} rated items for user {user_id}")

        if not self.is_trained:
            error_msg = "Model not trained. Call fit() before getting top-rated items."
            self.logger.error(error_msg)
            raise ValueError(error_msg)

        if self.movies_df is None:
            error_msg = (
                "Movie information not available. Provide movies_df during fit()."
            )
            self.logger.error(error_msg)
            raise ValueError(error_msg)

        # Check if user_id is valid
        if user_id < 1 or user_id > self.n_users:
            error_msg = f"Invalid user_id. Must be between 1 and {self.n_users}"
            self.logger.error(error_msg)
            raise ValueError(error_msg)

        # Get top-rated items
        top_rated = get_top_rated_items_for_user(
            user_id, self.ratings_matrix, self.movies_df, min_rating, top_n
        )

        self.logger.info(f"Found {len(top_rated)} top-rated items for user {user_id}")

        return top_rated

    def more_like_this(self, user_id, item_id, top_n=10):
        """
        Recommend items that are similar to a specific item and likely to be enjoyed by the user.
        This is a hybrid approach combining item similarity with user preferences.

        Parameters:
        -----------
        user_id : int
            ID of the user (1-based)
        item_id : int
            ID of the reference item (1-based)
        top_n : int, optional
            Number of recommendations to generate, default 10

        Returns:
        --------
        pandas.DataFrame
            DataFrame with recommended items
        """
        self.logger.info(f"Finding items similar to {item_id} for user {user_id}")

        if not self.is_trained:
            error_msg = "Model not trained. Call fit() before recommendations."
            self.logger.error(error_msg)
            raise ValueError(error_msg)

        # Check if user_id and item_id are valid
        if user_id < 1 or user_id > self.n_users:
            error_msg = f"Invalid user_id. Must be between 1 and {self.n_users}"
            self.logger.error(error_msg)
            raise ValueError(error_msg)

        if item_id < 1 or item_id > self.n_items:
            error_msg = f"Invalid item_id. Must be between 1 and {self.n_items}"
            self.logger.error(error_msg)
            raise ValueError(error_msg)

        # Get similar items
        similar_items = self.get_similar_items(
            item_id, top_n=top_n * 2
        )  # Get more for filtering

        # Adjust user_id and item_id to 0-based indexing
        user_idx = user_id - 1

        # Get user's ratings
        user_ratings = self.ratings_matrix[user_idx]
        user_predictions = self.predicted_ratings[user_idx]

        # Convert movie_ids to 0-based indices
        item_indices = similar_items["movie_id"].values - 1

        # Filter out items the user has already rated
        unrated_mask = user_ratings[item_indices] == 0
        unrated_indices = item_indices[unrated_mask]

        if len(unrated_indices) == 0:
            self.logger.warning(
                f"User {user_id} has already rated all similar items to {item_id}"
            )
            return pd.DataFrame(
                columns=["movie_id", "movie_title", "predicted_rating", "similarity"]
            )

        # Get predicted ratings for these items
        predicted_ratings = user_predictions[unrated_indices]

        # Get similarities to reference item
        similarities = similar_items["similarity"].values[unrated_mask]

        # Calculate a combined score (normalize both values to 0-1 range)
        max_rating = 5.0
        combined_scores = (0.6 * (predicted_ratings / max_rating)) + (
            0.4 * similarities
        )

        # Sort by combined score and take top_n
        top_indices = np.argsort(-combined_scores)[: min(top_n, len(combined_scores))]

        # Create recommendation DataFrame
        recommendations = pd.DataFrame(
            {
                "movie_id": unrated_indices[top_indices] + 1,  # Convert back to 1-based
                "predicted_rating": predicted_ratings[top_indices],
                "similarity": similarities[top_indices],
                "combined_score": combined_scores[top_indices],
            }
        )

        # Add movie information
        recommendations = recommendations.merge(
            self.movies_df[["movie_id", "movie_title"]], on="movie_id"
        )

        # Sort by combined score
        recommendations = recommendations.sort_values("combined_score", ascending=False)

        self.logger.info(
            f"Found {len(recommendations)} 'more like this' recommendations"
        )

        return recommendations

    def save_model(self, filepath):
        """
        Save the trained model to a file.

        Parameters:
        -----------
        filepath : str
            Path to save the model to
        """
        if not self.is_trained:
            error_msg = "Model not trained. Call fit() before saving."
            self.logger.error(error_msg)
            raise ValueError(error_msg)

        self.logger.info(f"Saving model to {filepath}")

        # Create directory if it doesn't exist
        os.makedirs(os.path.dirname(filepath), exist_ok=True)

        import pickle

        model_data = {
            "similarity_method": self.similarity_method,
            "prediction_method": self.prediction_method,
            "k": self.k,
            "ratings_matrix": self.ratings_matrix,
            "item_similarity": self.item_similarity,
            "predicted_ratings": self.predicted_ratings,
            "n_users": self.n_users,
            "n_items": self.n_items,
            "user_means": self.user_means,
            "is_trained": self.is_trained,
        }

        save_start = time.time()
        with open(filepath, "wb") as f:
            pickle.dump(model_data, f)
        save_time = time.time() - save_start

        # Calculate file size
        file_size = os.path.getsize(filepath) / (1024 * 1024)  # in MB
        self.logger.info(
            f"Model saved to {filepath} ({file_size:.2f} MB) in {save_time:.2f}s"
        )

    @classmethod
    def load_model(cls, filepath, movies_df=None):
        """
        Load a trained model from a file.

        Parameters:
        -----------
        filepath : str
            Path to load the model from
        movies_df : pandas.DataFrame, optional
            DataFrame containing movie information

        Returns:
        --------
        ItemBasedCF
            Loaded model instance
        """
        logger = get_logger()
        logger.info(f"Loading model from {filepath}")

        if not os.path.exists(filepath):
            error_msg = f"Model file {filepath} not found"
            logger.error(error_msg)
            raise FileNotFoundError(error_msg)

        import pickle

        load_start = time.time()
        with open(filepath, "rb") as f:
            model_data = pickle.load(f)
        load_time = time.time() - load_start

        # Create new instance
        model = cls(
            similarity_method=model_data["similarity_method"],
            prediction_method=model_data["prediction_method"],
            k=model_data["k"],
        )

        # Restore model attributes
        model.ratings_matrix = model_data["ratings_matrix"]
        model.item_similarity = model_data["item_similarity"]
        model.predicted_ratings = model_data["predicted_ratings"]
        model.n_users = model_data["n_users"]
        model.n_items = model_data["n_items"]
        model.user_means = model_data.get(
            "user_means", None
        )  # Handle backward compatibility
        model.is_trained = model_data["is_trained"]
        model.movies_df = movies_df

        # Calculate file size
        file_size = os.path.getsize(filepath) / (1024 * 1024)  # in MB
        logger.info(
            f"Model loaded from {filepath} ({file_size:.2f} MB) in {load_time:.2f}s"
        )
        logger.info(f"Model dimensions: {model.n_users} users, {model.n_items} items")

        return model
