#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Complete recommendation model integrating all components.
With added logging functionality.

Created on: 2025-04-18
Author: Nianqing Liu
"""

import os
import numpy as np
import pandas as pd
import time
from .similarity import calculate_similarity
from .prediction import predict_ratings
from .recommendation import recommend_items, get_popular_items
from .evaluation import evaluate_recommendations
from .logger import get_logger


class UserBasedCF:
    """
    User-Based Collaborative Filtering Recommendation System.

    This class implements a complete recommendation system using user-based
    collaborative filtering algorithm. It identifies similar users based on rating
    patterns and recommends items that similar users liked but the target user
    hasn't experienced yet.
    """

    def __init__(
        self, similarity_method="cosine", prediction_method="bias_weighted", k=30
    ):
        """
        Initialize the UserBasedCF model.

        Parameters:
        -----------
        similarity_method : str
            Method to calculate user similarity: 'cosine', 'pearson', 'euclidean', 'adjusted_cosine'
        prediction_method : str
            Method to predict ratings: 'simple_weighted', 'bias_weighted'
        k : int
            Number of similar users to consider for predictions
        """
        self.logger = get_logger()
        self.logger.info(
            f"Initializing UserBasedCF model with similarity_method={similarity_method}, "
            f"prediction_method={prediction_method}, k={k}"
        )

        self.similarity_method = similarity_method
        self.prediction_method = prediction_method
        self.k = k

        # Initialize data attributes
        self.ratings_df = None
        self.movies_df = None
        self.train_data = None
        self.test_data = None
        self.ratings_matrix = None
        self.user_similarity = None
        self.predicted_ratings = None
        self.n_users = None
        self.n_items = None

        # Track if model is trained
        self.is_trained = False

    def fit(self, ratings_df, movies_df=None, test_size=0.2, random_state=42):
        """
        Train the recommendation model.

        Parameters:
        -----------
        ratings_df : pandas.DataFrame
            DataFrame containing rating data with columns 'user_id', 'item_id', 'rating'
        movies_df : pandas.DataFrame, optional
            DataFrame containing movie information with at least 'movie_id' and 'movie_title'
        test_size : float, optional
            Fraction of data to use for testing, default 0.2
        random_state : int, optional
            Random seed for train/test split, default 42

        Returns:
        --------
        self : UserBasedCF
            Trained model instance
        """
        from sklearn.model_selection import train_test_split

        self.logger.info("Starting model training")
        train_start = time.time()

        # Store data
        self.ratings_df = ratings_df
        self.movies_df = movies_df

        # Get dimensions
        self.n_users = ratings_df["user_id"].max()
        self.n_items = ratings_df["item_id"].max()

        self.logger.info(
            f"Training model with {self.n_users} users and {self.n_items} items"
        )

        # Split data if test_size > 0
        if test_size > 0:
            self.logger.info(
                f"Splitting data with test_size={test_size}, random_state={random_state}"
            )
            split_start = time.time()
            self.train_data, self.test_data = train_test_split(
                ratings_df, test_size=test_size, random_state=random_state
            )
            split_time = time.time() - split_start

            self.logger.info(
                f"Split data in {split_time:.2f}s: {len(self.train_data)} training and {len(self.test_data)} testing samples"
            )
        else:
            self.train_data = ratings_df
            self.test_data = None
            self.logger.info("Using all data for training (no test split)")

        # Create rating matrix from training data
        self.logger.info("Creating rating matrix from training data")
        matrix_start = time.time()
        self.ratings_matrix = np.zeros((self.n_users, self.n_items))

        for row in self.train_data.itertuples():
            # Adjust for 0-based indexing
            user_idx = row.user_id - 1
            item_idx = row.item_id - 1
            self.ratings_matrix[user_idx, item_idx] = row.rating

        matrix_time = time.time() - matrix_start
        self.logger.info(f"Rating matrix created in {matrix_time:.2f}s")

        # Calculate matrix density
        n_ratings = np.sum(self.ratings_matrix > 0)
        density = n_ratings / (self.n_users * self.n_items)
        self.logger.info(f"Rating matrix density: {density:.6f} ({n_ratings} ratings)")

        # Calculate user similarity
        self.logger.info(
            f"Computing user similarities using {self.similarity_method} method"
        )
        sim_start = time.time()
        self.user_similarity = calculate_similarity(
            self.ratings_matrix, method=self.similarity_method
        )
        sim_time = time.time() - sim_start
        self.logger.info(f"User similarities computed in {sim_time:.2f}s")

        # Predict ratings
        self.logger.info(
            f"Predicting ratings using {self.prediction_method} method with k={self.k}"
        )
        pred_start = time.time()
        self.predicted_ratings = predict_ratings(
            self.ratings_matrix,
            self.user_similarity,
            method=self.prediction_method,
            k=self.k,
        )
        pred_time = time.time() - pred_start
        self.logger.info(f"Ratings predicted in {pred_time:.2f}s")

        # Mark model as trained
        self.is_trained = True

        total_time = time.time() - train_start
        self.logger.info(f"Model training completed in {total_time:.2f}s")
        return self

    def recommend(self, user_id, top_n=10):
        """
        Generate recommendations for a specific user.

        Parameters:
        -----------
        user_id : int
            ID of the user to get recommendations for (1-based)
        top_n : int, optional
            Number of recommendations to generate, default 10

        Returns:
        --------
        pandas.DataFrame
            DataFrame with recommended items, predicted ratings, and item info
        """
        self.logger.info(f"Generating {top_n} recommendations for user {user_id}")

        if not self.is_trained:
            error_msg = (
                "Model not trained. Call fit() before generating recommendations."
            )
            self.logger.error(error_msg)
            raise ValueError(error_msg)

        if self.movies_df is None:
            error_msg = (
                "Movie information not available. Provide movies_df during fit()."
            )
            self.logger.error(error_msg)
            raise ValueError(error_msg)

        # Check if user_id is valid
        if user_id < 1 or user_id > self.n_users:
            error_msg = f"Invalid user_id. Must be between 1 and {self.n_users}"
            self.logger.error(error_msg)
            raise ValueError(error_msg)

        # Generate recommendations
        rec_start = time.time()
        recommendations = recommend_items(
            user_id,
            self.ratings_matrix,
            self.predicted_ratings,
            self.movies_df,
            top_n=top_n,
        )
        rec_time = time.time() - rec_start

        # If no recommendations (user has rated all items), return popular items
        if len(recommendations) == 0:
            self.logger.warning(
                f"User {user_id} has rated all items. Returning popular items instead."
            )
            pop_start = time.time()
            recommendations = get_popular_items(
                self.ratings_matrix, self.movies_df, top_n=top_n
            )
            pop_time = time.time() - pop_start
            self.logger.info(
                f"Generated {len(recommendations)} popular item recommendations in {pop_time:.2f}s"
            )
        else:
            self.logger.info(
                f"Generated {len(recommendations)} recommendations in {rec_time:.2f}s"
            )

            # Log a few top recommendations
            if not recommendations.empty:
                top_3 = recommendations.head(3)[
                    ["movie_title", "predicted_rating"]
                ].to_dict(orient="records")
                self.logger.info(f"Top 3 recommendations: {top_3}")

        return recommendations

    def evaluate(self, k_values=[5, 10], threshold=3.5):
        """
        Evaluate the model's performance.

        Parameters:
        -----------
        k_values : list, optional
            List of k values for precision/recall calculation, default [5, 10]
        threshold : float, optional
            Rating threshold for relevance in precision/recall, default 3.5

        Returns:
        --------
        dict
            Dictionary containing evaluation metrics
        """
        self.logger.info(
            f"Evaluating model with k_values={k_values}, threshold={threshold}"
        )

        if not self.is_trained:
            error_msg = "Model not trained. Call fit() before evaluation."
            self.logger.error(error_msg)
            raise ValueError(error_msg)

        if self.test_data is None:
            error_msg = "No test data available. Set test_size > 0 during fit()."
            self.logger.error(error_msg)
            raise ValueError(error_msg)

        # Evaluate recommendations
        eval_start = time.time()
        results = evaluate_recommendations(
            self.test_data,
            self.predicted_ratings,
            ratings_matrix=self.ratings_matrix,
            k_values=k_values,
            threshold=threshold,
        )
        eval_time = time.time() - eval_start

        self.logger.info(f"Evaluation completed in {eval_time:.2f}s")

        # Log evaluation metrics
        for metric, value in results.items():
            self.logger.info(f"{metric}: {value:.4f}")

        return results

    def get_similar_users(self, user_id, top_n=10):
        """
        Find users similar to the specified user.

        Parameters:
        -----------
        user_id : int
            ID of the reference user (1-based)
        top_n : int, optional
            Number of similar users to return, default 10

        Returns:
        --------
        pandas.DataFrame
            DataFrame with similar users and similarity scores
        """
        self.logger.info(f"Finding {top_n} users similar to user {user_id}")

        if not self.is_trained:
            error_msg = "Model not trained. Call fit() before getting similar users."
            self.logger.error(error_msg)
            raise ValueError(error_msg)

        # Adjust for 0-based indexing
        user_idx = user_id - 1

        # Check if user_id is valid
        if user_idx < 0 or user_idx >= self.n_users:
            error_msg = f"Invalid user_id. Must be between 1 and {self.n_users}"
            self.logger.error(error_msg)
            raise ValueError(error_msg)

        # Get similarities for the user
        user_similarities = self.user_similarity[user_idx]

        # Set self-similarity to -1 to exclude it
        user_similarities[user_idx] = -1

        # Get top similar users
        similar_user_indices = np.argsort(-user_similarities)[:top_n]
        similarities = user_similarities[similar_user_indices]

        # Convert to 1-based user IDs
        similar_user_ids = similar_user_indices + 1

        # Create DataFrame
        similar_users = pd.DataFrame(
            {"user_id": similar_user_ids, "similarity": similarities}
        )

        self.logger.info(f"Found {len(similar_users)} similar users")

        # Log a few top similar users
        if not similar_users.empty:
            top_3 = similar_users.head(3).to_dict(orient="records")
            self.logger.info(f"Top 3 similar users: {top_3}")

        return similar_users

    def save_model(self, filepath):
        """
        Save the trained model to a file.

        Parameters:
        -----------
        filepath : str
            Path to save the model to
        """
        if not self.is_trained:
            error_msg = "Model not trained. Call fit() before saving."
            self.logger.error(error_msg)
            raise ValueError(error_msg)

        self.logger.info(f"Saving model to {filepath}")

        # Create directory if it doesn't exist
        os.makedirs(os.path.dirname(filepath), exist_ok=True)

        import pickle

        model_data = {
            "similarity_method": self.similarity_method,
            "prediction_method": self.prediction_method,
            "k": self.k,
            "ratings_matrix": self.ratings_matrix,
            "user_similarity": self.user_similarity,
            "predicted_ratings": self.predicted_ratings,
            "n_users": self.n_users,
            "n_items": self.n_items,
            "is_trained": self.is_trained,
        }

        save_start = time.time()
        with open(filepath, "wb") as f:
            pickle.dump(model_data, f)
        save_time = time.time() - save_start

        # Calculate file size
        file_size = os.path.getsize(filepath) / (1024 * 1024)  # in MB
        self.logger.info(
            f"Model saved to {filepath} ({file_size:.2f} MB) in {save_time:.2f}s"
        )

    @classmethod
    def load_model(cls, filepath, movies_df=None):
        """
        Load a trained model from a file.

        Parameters:
        -----------
        filepath : str
            Path to load the model from
        movies_df : pandas.DataFrame, optional
            DataFrame containing movie information

        Returns:
        --------
        UserBasedCF
            Loaded model instance
        """
        logger = get_logger()
        logger.info(f"Loading model from {filepath}")

        if not os.path.exists(filepath):
            error_msg = f"Model file {filepath} not found"
            logger.error(error_msg)
            raise FileNotFoundError(error_msg)

        import pickle

        load_start = time.time()
        with open(filepath, "rb") as f:
            model_data = pickle.load(f)
        load_time = time.time() - load_start

        # Create new instance
        model = cls(
            similarity_method=model_data["similarity_method"],
            prediction_method=model_data["prediction_method"],
            k=model_data["k"],
        )

        # Restore model attributes
        model.ratings_matrix = model_data["ratings_matrix"]
        model.user_similarity = model_data["user_similarity"]
        model.predicted_ratings = model_data["predicted_ratings"]
        model.n_users = model_data["n_users"]
        model.n_items = model_data["n_items"]
        model.is_trained = model_data["is_trained"]
        model.movies_df = movies_df

        # Calculate file size
        file_size = os.path.getsize(filepath) / (1024 * 1024)  # in MB
        logger.info(
            f"Model loaded from {filepath} ({file_size:.2f} MB) in {load_time:.2f}s"
        )
        logger.info(f"Model dimensions: {model.n_users} users, {model.n_items} items")

        return model
