#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Command line entry program for Item-Based Collaborative Filtering Recommendation System.
With added logging functionality.

Created on: 2025-04-20
Author: Nianqing Liu
"""

import os
import argparse
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import traceback
import time

from src.data_loader import DataLoader
from src.model import ItemBasedCF
from src.logger import setup_logger


def parse_args():
    """Parse command line arguments."""
    parser = argparse.ArgumentParser(
        description="Item-Based Collaborative Filtering Recommendation System"
    )

    # Data parameters
    parser.add_argument(
        "--data_path",
        type=str,
        default="data/ml-100k",
        help="Path to the MovieLens dataset directory",
    )

    # Model parameters
    parser.add_argument(
        "--similarity",
        type=str,
        default="adjusted_cosine",
        choices=["cosine", "pearson", "euclidean", "adjusted_cosine"],
        help="Similarity measure to use",
    )
    parser.add_argument(
        "--prediction",
        type=str,
        default="weighted_average_with_baseline",
        choices=["simple_weighted", "weighted_average_with_baseline"],
        help="Prediction method to use",
    )
    parser.add_argument(
        "--k", type=int, default=30, help="Number of similar items to consider"
    )

    # Output directory parameters
    parser.add_argument(
        "--output_dir",
        type=str,
        default="results",
        help="Directory to save visualization outputs",
    )
    parser.add_argument(
        "--log_dir",
        type=str,
        default="results/logs",
        help="Directory to save log files",
    )
    parser.add_argument(
        "--log_level",
        type=str,
        default="INFO",
        choices=["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"],
        help="Logging level",
    )

    # Recommendation parameters
    parser.add_argument(
        "--user_id",
        type=int,
        required=False,
        help="User ID to generate recommendations for",
    )
    parser.add_argument(
        "--item_id",
        type=int,
        required=False,
        help="Item ID to find similar items for",
    )
    parser.add_argument(
        "--num_recommendations",
        type=int,
        default=10,
        help="Number of recommendations to generate",
    )

    # Evaluation parameters
    parser.add_argument(
        "--test_size",
        type=float,
        default=0.2,
        help="Fraction of data to use for testing",
    )
    parser.add_argument("--evaluate", action="store_true", help="Evaluate the model")

    # Visualization parameters
    parser.add_argument(
        "--visualize",
        action="store_true",
        help="Visualize item similarities and recommendations",
    )

    # Model saving/loading
    parser.add_argument(
        "--save_model", type=str, default=None, help="Path to save the trained model"
    )
    parser.add_argument(
        "--load_model", type=str, default=None, help="Path to load a trained model"
    )

    # Parameter tuning
    parser.add_argument("--tune", action="store_true", help="Perform parameter tuning")

    # More like this functionality
    parser.add_argument(
        "--more_like_this",
        action="store_true",
        help="Find items similar to the specified item that the user might like",
    )

    return parser.parse_args()


def visualize_item_similarities(model, output_dir, logger, n_items=50):
    """Visualize item similarity matrix for a subset of items."""
    if not model.is_trained:
        logger.warning("Model not trained. Cannot visualize similarities.")
        return

    logger.info(f"Visualizing item similarities for first {n_items} items...")

    # Get subset of similarity matrix
    sim_subset = model.item_similarity[:n_items, :n_items]

    plt.figure(figsize=(10, 8))
    plt.imshow(sim_subset, cmap="viridis")
    plt.colorbar(label="Similarity")
    plt.title(f"Item Similarity Matrix (First {n_items} Items)")
    plt.xlabel("Item ID")
    plt.ylabel("Item ID")
    plt.tight_layout()

    output_path = os.path.join(output_dir, "images/item_similarities.png")
    plt.savefig(output_path)
    logger.info(f"Item similarity visualization saved to {output_path}")


def visualize_recommendations(recommendations, output_dir, logger):
    """Visualize recommendations for a user."""
    if recommendations.empty:
        logger.warning("No recommendations to visualize.")
        return

    logger.info("Visualizing recommendations...")

    plt.figure(figsize=(12, 6))

    # Sort by predicted rating
    sorted_recs = recommendations.sort_values("predicted_rating", ascending=True)

    # Plot horizontal bar chart
    plt.barh(
        sorted_recs["movie_title"], sorted_recs["predicted_rating"], color="skyblue"
    )
    plt.xlabel("Predicted Rating")
    plt.title("Movie Recommendations")
    plt.xlim(0, 5.5)
    plt.grid(axis="x", linestyle="--", alpha=0.7)
    plt.tight_layout()

    output_path = os.path.join(output_dir, "images/recommendations.png")
    plt.savefig(output_path)
    logger.info(f"Recommendations visualization saved to {output_path}")


def visualize_similar_items(similar_items, output_dir, logger):
    """Visualize similar items."""
    if similar_items.empty:
        logger.warning("No similar items to visualize.")
        return

    logger.info("Visualizing similar items...")

    plt.figure(figsize=(12, 6))

    # Sort by similarity
    sorted_items = similar_items.sort_values("similarity", ascending=True)

    # Plot horizontal bar chart
    plt.barh(
        sorted_items["movie_title"], sorted_items["similarity"], color="lightgreen"
    )
    plt.xlabel("Similarity Score")
    plt.title("Similar Movies")
    plt.xlim(0, 1.05)
    plt.grid(axis="x", linestyle="--", alpha=0.7)
    plt.tight_layout()

    output_path = os.path.join(output_dir, "images/similar_items.png")
    plt.savefig(output_path)
    logger.info(f"Similar items visualization saved to {output_path}")


def tune_parameters(data_loader, output_dir, logger, param_grid=None):
    """Perform parameter tuning for the item-based CF model."""
    logger.info("Starting parameter tuning...")

    if param_grid is None:
        param_grid = {
            "similarity_method": ["cosine", "pearson", "adjusted_cosine"],
            "k": [10, 20, 30, 40, 50],
        }

    logger.info(f"Parameter grid: {param_grid}")
    results = []

    # Load data
    data_loader.load_data()
    logger.info("Data loaded for parameter tuning")

    # Determine correct matrix dimensions (add 1 to maximum IDs)
    max_user_id = data_loader.ratings_df["user_id"].max()
    max_item_id = data_loader.ratings_df["item_id"].max()
    data_loader.n_users = max_user_id
    data_loader.n_items = max_item_id
    logger.info(f"Dataset dimensions: {max_user_id} users, {max_item_id} items")

    # Split data once
    train_data, test_data, train_matrix, test_matrix = data_loader.split_data(
        test_size=0.2
    )

    logger.info(
        f"Split data into {len(train_data)} training and {len(test_data)} testing samples."
    )

    # Check for any item_id in test_data that exceeds max_item_id
    test_max_item_id = test_data["item_id"].max()
    if test_max_item_id > max_item_id:
        logger.warning(
            f"Test data contains item_id {test_max_item_id} which exceeds max_item_id {max_item_id}"
        )
        max_item_id = test_max_item_id

    # Test combinations
    total_combinations = len(param_grid["similarity_method"]) * len(param_grid["k"])
    current_combo = 0

    for sim_method in param_grid["similarity_method"]:
        for k in param_grid["k"]:
            current_combo += 1
            logger.info(
                f"Testing combination {current_combo}/{total_combinations}: {sim_method} similarity with k={k}..."
            )

            try:
                # Train model
                start_time = time.time()
                model = ItemBasedCF(similarity_method=sim_method, k=k)
                model.fit(train_data, data_loader.movies_df, test_size=0)
                train_time = time.time() - start_time
                logger.info(f"Model training completed in {train_time:.2f} seconds")

                # Set the test data
                model.test_data = test_data

                # Ensure matrix dimensions can accommodate all item IDs in test data
                if model.n_items < test_max_item_id:
                    logger.warning(
                        f"Adjusting model dimensions to accommodate max item_id {test_max_item_id}"
                    )
                    model.n_items = test_max_item_id

                    # Resize predicted_ratings matrix if needed
                    if model.predicted_ratings.shape[1] < test_max_item_id:
                        old_shape = model.predicted_ratings.shape
                        new_predicted_ratings = np.zeros((model.n_users, model.n_items))
                        new_predicted_ratings[:, : old_shape[1]] = (
                            model.predicted_ratings
                        )
                        model.predicted_ratings = new_predicted_ratings
                        logger.info(
                            f"Resized predicted_ratings from {old_shape} to {model.predicted_ratings.shape}"
                        )

                # Evaluate
                start_time = time.time()
                metrics = model.evaluate()
                eval_time = time.time() - start_time
                logger.info(f"Evaluation completed in {eval_time:.2f} seconds")

                # Store results
                results.append(
                    {
                        "similarity_method": sim_method,
                        "k": k,
                        "rmse": metrics["rmse"],
                        "mae": metrics["mae"],
                        "precision@10": metrics["precision@10"],
                        "recall@10": metrics["recall@10"],
                        "f1@10": metrics["f1@10"],
                        "training_time": train_time,
                        "evaluation_time": eval_time,
                    }
                )

                logger.info(
                    f"RMSE: {metrics['rmse']:.4f}, F1@10: {metrics['f1@10']:.4f}"
                )
            except Exception as e:
                error_msg = f"Error evaluating with {sim_method}, k={k}: {str(e)}"
                logger.error(error_msg)
                logger.debug(traceback.format_exc())

                results.append(
                    {
                        "similarity_method": sim_method,
                        "k": k,
                        "rmse": float("nan"),
                        "mae": float("nan"),
                        "precision@10": float("nan"),
                        "recall@10": float("nan"),
                        "f1@10": float("nan"),
                        "training_time": float("nan"),
                        "evaluation_time": float("nan"),
                    }
                )

    # Convert to DataFrame
    results_df = pd.DataFrame(results)
    logger.info("Parameter tuning completed")

    # Save tuning results to CSV
    results_csv_path = os.path.join(output_dir, "csv/parameter_tuning_results.csv")
    results_df.to_csv(results_csv_path, index=False)
    logger.info(f"Parameter tuning results saved to {results_csv_path}")

    # Find best parameters
    if not results_df["rmse"].isna().all():
        best_rmse_idx = results_df["rmse"].idxmin()
        best_rmse_params = {
            "similarity_method": results_df.loc[best_rmse_idx, "similarity_method"],
            "k": results_df.loc[best_rmse_idx, "k"],
            "rmse": results_df.loc[best_rmse_idx, "rmse"],
        }
        logger.info(f"Best parameters by RMSE: {best_rmse_params}")

    if not results_df["f1@10"].isna().all():
        best_f1_idx = results_df["f1@10"].idxmax()
        best_f1_params = {
            "similarity_method": results_df.loc[best_f1_idx, "similarity_method"],
            "k": results_df.loc[best_f1_idx, "k"],
            "f1@10": results_df.loc[best_f1_idx, "f1@10"],
        }
        logger.info(f"Best parameters by F1@10: {best_f1_params}")

    # Visualize results if there are valid results
    if not results_df["rmse"].isna().all() and not results_df["f1@10"].isna().all():
        logger.info("Creating parameter tuning visualizations...")
        plt.figure(figsize=(15, 6))

        plt.subplot(1, 2, 1)
        for sim in param_grid["similarity_method"]:
            df_sim = results_df[results_df["similarity_method"] == sim]
            if not df_sim["rmse"].isna().all():
                plt.plot(df_sim["k"], df_sim["rmse"], marker="o", label=sim)

        plt.title("RMSE by Similarity Method and k")
        plt.xlabel("k (number of neighbors)")
        plt.ylabel("RMSE")
        plt.grid(True)
        plt.legend()

        plt.subplot(1, 2, 2)
        for sim in param_grid["similarity_method"]:
            df_sim = results_df[results_df["similarity_method"] == sim]
            if not df_sim["f1@10"].isna().all():
                plt.plot(df_sim["k"], df_sim["f1@10"], marker="o", label=sim)

        plt.title("F1@10 by Similarity Method and k")
        plt.xlabel("k (number of neighbors)")
        plt.ylabel("F1@10")
        plt.grid(True)
        plt.legend()

        plt.tight_layout()

        vis_path = os.path.join(output_dir, "images/parameter_tuning.png")
        plt.savefig(vis_path)
        logger.info(f"Parameter tuning visualization saved to {vis_path}")

    return results_df


def main():
    """Main function to run the recommendation system."""
    # Parse command line arguments
    args = parse_args()

    # Setup logging
    log_level = getattr(logging, args.log_level)
    logger = setup_logger(log_dir=args.log_dir, log_level=log_level)
    logger.info("=" * 80)
    logger.info("Starting item-based collaborative filtering recommendation system")
    logger.info(f"Command line arguments: {args}")

    # Create output directories
    images_dir = os.path.join(args.output_dir, "images")
    csv_dir = os.path.join(args.output_dir, "csv")
    os.makedirs(images_dir, exist_ok=True)
    os.makedirs(csv_dir, exist_ok=True)
    logger.info(f"Output directories created: {images_dir}")

    # Log system information
    import platform
    import sys

    logger.info(f"Python version: {sys.version}")
    logger.info(f"Platform: {platform.platform()}")

    # Create data loader
    data_loader = DataLoader(data_path=args.data_path)
    logger.info(f"Data loader initialized with path: {args.data_path}")

    try:
        # Load data
        start_time = time.time()
        ratings_df, movies_df = data_loader.load_data()
        load_time = time.time() - start_time

        logger.info(
            f"Loaded MovieLens dataset in {load_time:.2f} seconds with {len(ratings_df)} ratings "
            f"from {ratings_df['user_id'].nunique()} users on {ratings_df['item_id'].nunique()} movies."
        )

        # Log dataset stats
        stats = data_loader.get_dataset_stats()
        logger.info(f"Dataset statistics: {stats}")

        # Parameter tuning if requested
        if args.tune:
            logger.info("Running parameter tuning...")
            tune_parameters(data_loader, output_dir=args.output_dir, logger=logger)
            return 0

        # Create or load model
        if args.load_model:
            logger.info(f"Loading model from {args.load_model}...")
            start_time = time.time()
            model = ItemBasedCF.load_model(args.load_model, movies_df=movies_df)
            load_time = time.time() - start_time
            logger.info(f"Model loaded in {load_time:.2f} seconds")
        else:
            logger.info(
                f"Training new model with similarity={args.similarity}, prediction={args.prediction}, k={args.k}..."
            )
            start_time = time.time()
            model = ItemBasedCF(
                similarity_method=args.similarity,
                prediction_method=args.prediction,
                k=args.k,
            )
            model.fit(ratings_df, movies_df, test_size=args.test_size)
            train_time = time.time() - start_time
            logger.info(f"Model training completed in {train_time:.2f} seconds")

        # Save model if requested
        if args.save_model:
            logger.info(f"Saving model to {args.save_model}...")
            model.save_model(args.save_model)
            logger.info("Model saved successfully")

        # Evaluate model if requested
        if args.evaluate:
            logger.info("Evaluating model...")
            start_time = time.time()
            metrics = model.evaluate()
            eval_time = time.time() - start_time

            # Save metrics to CSV
            metrics_df = pd.DataFrame([metrics])
            metrics_path = os.path.join(args.output_dir, "csv/evaluation_metrics.csv")
            metrics_df.to_csv(metrics_path, index=False)
            logger.info(f"Evaluation metrics saved to {metrics_path}")

        # Visualize similarities if requested
        if args.visualize:
            logger.info("Visualizing item similarities...")
            visualize_item_similarities(
                model, output_dir=args.output_dir, logger=logger
            )

        # Get similar items if item_id is provided
        if args.item_id:
            logger.info(f"Finding items similar to item {args.item_id}...")
            start_time = time.time()
            similar_items = model.get_similar_items(
                args.item_id, top_n=args.num_recommendations
            )
            sim_time = time.time() - start_time
            logger.info(
                f"Found {len(similar_items)} similar items in {sim_time:.2f} seconds"
            )

            # Log top similar items
            if not similar_items.empty:
                top_sim = (
                    similar_items[["movie_title", "similarity"]]
                    .head(5)
                    .to_dict(orient="records")
                )
                logger.info(f"Top 5 similar items: {top_sim}")

                # Save similar items to CSV
                sim_path = os.path.join(
                    args.output_dir, f"csv/similar_items_{args.item_id}.csv"
                )
                similar_items.to_csv(sim_path, index=False)
                logger.info(f"Similar items saved to {sim_path}")

                # Visualize similar items if requested
                if args.visualize:
                    visualize_similar_items(
                        similar_items, output_dir=args.output_dir, logger=logger
                    )

        # Generate recommendations if user_id is provided
        if args.user_id:
            # If more_like_this is requested and both user_id and item_id are provided
            if args.more_like_this and args.item_id:
                logger.info(
                    f"Finding items similar to {args.item_id} that user {args.user_id} might like..."
                )
                start_time = time.time()

                recommendations = model.more_like_this(
                    args.user_id, args.item_id, top_n=args.num_recommendations
                )

                rec_time = time.time() - start_time
                logger.info(
                    f"Generated {len(recommendations)} 'more like this' recommendations in {rec_time:.2f} seconds"
                )

                # Save recommendations to CSV
                if not recommendations.empty:
                    rec_path = os.path.join(
                        args.output_dir,
                        f"csv/more_like_{args.item_id}_for_user_{args.user_id}.csv",
                    )
                    recommendations.to_csv(rec_path, index=False)
                    logger.info(f"'More like this' recommendations saved to {rec_path}")

            else:
                # Regular recommendations
                logger.info(f"Generating recommendations for user {args.user_id}...")
                start_time = time.time()
                recommendations = model.recommend(
                    args.user_id, top_n=args.num_recommendations
                )
                rec_time = time.time() - start_time
                logger.info(
                    f"Generated {len(recommendations)} recommendations in {rec_time:.2f} seconds"
                )

                # Log top recommendations
                if not recommendations.empty:
                    top_recs = (
                        recommendations[["movie_title", "predicted_rating"]]
                        .head(5)
                        .to_dict(orient="records")
                    )
                    logger.info(f"Top 5 recommendations: {top_recs}")

                    # Save recommendations to CSV
                    rec_path = os.path.join(
                        args.output_dir, f"csv/recommendations_user_{args.user_id}.csv"
                    )
                    recommendations.to_csv(rec_path, index=False)
                    logger.info(f"Recommendations saved to {rec_path}")
                else:
                    logger.warning("No recommendations generated")

                # Visualize recommendations if requested
                if args.visualize and not recommendations.empty:
                    visualize_recommendations(
                        recommendations, output_dir=args.output_dir, logger=logger
                    )

        logger.info("Recommendation system execution completed successfully")
        return 0

    except Exception as e:
        logger.error(f"Error: {str(e)}")
        logger.error(traceback.format_exc())
        return 1


if __name__ == "__main__":
    import logging

    exit_code = main()
    exit(exit_code)
