import os

import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import shap
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.metrics import mean_squared_error, mean_absolute_error
from sklearn.model_selection import train_test_split, GridSearchCV


def ensure_picture_folder_exists():
    """Ensure the picture folder exists."""
    picture_folder = "./picture"
    if not os.path.exists(picture_folder):
        os.makedirs(picture_folder)
    return picture_folder


def load_data():
    """Load medal and program data from CSV files."""
    medal_data = pd.read_csv(r'summerOly_medal_counts.csv', encoding='ISO-8859-1')
    program_data = pd.read_csv(r'summerOly_programs.csv', encoding='ISO-8859-1')
    return medal_data, program_data


def preprocess_data(medal_data, program_data):
    """Fill missing values and convert categorical features to numeric."""
    medal_data.fillna(0, inplace=True)
    program_data.fillna(0, inplace=True)
    medal_data['NOC'] = medal_data['NOC'].astype('category').cat.codes
    np.random.seed(42)
    medal_data['Host'] = np.random.choice([0, 1], size=len(medal_data), p=[0.9, 0.1])
    return medal_data, program_data


def feature_engineering(medal_data):
    """Add rolling statistics, cumulative sum, and interaction features."""
    medal_data['Gold_3yr_avg'] = medal_data.groupby('NOC')['Gold'].transform(
        lambda x: x.rolling(window=3, min_periods=1).mean())
    medal_data['Total_3yr_avg'] = medal_data.groupby('NOC')['Total'].transform(
        lambda x: x.rolling(window=3, min_periods=1).mean())
    medal_data['Gold_3yr_std'] = medal_data.groupby('NOC')['Gold'].transform(
        lambda x: x.rolling(window=3, min_periods=1).std())
    medal_data['Total_3yr_std'] = medal_data.groupby('NOC')['Total'].transform(
        lambda x: x.rolling(window=3, min_periods=1).std())
    medal_data['Gold_3yr_max'] = medal_data.groupby('NOC')['Gold'].transform(
        lambda x: x.rolling(window=3, min_periods=1).max())
    medal_data['Total_3yr_max'] = medal_data.groupby('NOC')['Total'].transform(
        lambda x: x.rolling(window=3, min_periods=1).max())
    medal_data['Gold_cumsum'] = medal_data.groupby('NOC')['Gold'].cumsum()
    medal_data['Total_cumsum'] = medal_data.groupby('NOC')['Total'].cumsum()
    medal_data['Year_NOC_interaction'] = medal_data['Year'] * medal_data['NOC']
    return medal_data


def check_and_fill_nan(medal_data):
    """Check and fill NaN values in the data."""
    print("Checking for NaN values in the data:")
    print(medal_data.isnull().sum())
    medal_data.fillna(0, inplace=True)
    return medal_data


def extract_features_and_target(medal_data):
    """Extract features and target variables from the data."""
    features = [
        'Year', 'NOC', 'Host',
        'Gold_3yr_avg', 'Total_3yr_avg',
        'Gold_3yr_std', 'Total_3yr_std',
        'Gold_3yr_max', 'Total_3yr_max',
        'Gold_cumsum', 'Total_cumsum',
        'Year_NOC_interaction'
    ]
    X = medal_data[features]
    y_gold = medal_data['Gold']
    y_total = medal_data['Total']
    return X, y_gold, y_total


def split_data(X, y_gold, y_total):
    """Split the data into training and testing sets."""
    X_train, X_test, y_train_gold, y_test_gold = train_test_split(X, y_gold, test_size=0.2, random_state=42,
                                                                  shuffle=True)
    X_train, X_test, y_train_total, y_test_total = train_test_split(X, y_total, test_size=0.2, random_state=42,
                                                                    shuffle=True)
    return X_train, X_test, y_train_gold, y_test_gold, y_train_total, y_test_total


def hyperparameter_tuning(X_train, y_train_gold, y_train_total):
    """Perform hyperparameter tuning using GridSearchCV."""
    param_grid = {
        'n_estimators': [50, 100, 200],
        'learning_rate': [0.01, 0.05, 0.1],
        'max_depth': [3, 5, 7],
        'min_samples_split': [2, 5, 10]
    }

    model_gold_gbt = GradientBoostingRegressor(random_state=42)
    model_total_gbt = GradientBoostingRegressor(random_state=42)

    grid_search_gold = GridSearchCV(
        estimator=model_gold_gbt,
        param_grid=param_grid,
        scoring='neg_mean_squared_error',
        cv=3,
        n_jobs=-1,
        verbose=1
    )

    grid_search_total = GridSearchCV(
        estimator=model_total_gbt,
        param_grid=param_grid,
        scoring='neg_mean_squared_error',
        cv=3,
        n_jobs=-1,
        verbose=1
    )

    print("Starting grid search for gold medal prediction...")
    grid_search_gold.fit(X_train, y_train_gold)

    print("Starting grid search for total medal prediction...")
    grid_search_total.fit(X_train, y_train_total)

    print(f"Best parameters for gold medal prediction: {grid_search_gold.best_params_}")
    print(f"Best parameters for total medal prediction: {grid_search_total.best_params_}")

    model_gold_gbt_optimized = grid_search_gold.best_estimator_
    model_total_gbt_optimized = grid_search_total.best_estimator_

    return model_gold_gbt_optimized, model_total_gbt_optimized


def evaluate_model(model_gold_gbt_optimized, model_total_gbt_optimized, X_test, y_test_gold, y_test_total):
    """Evaluate the optimized models."""
    y_pred_gold_gbt_optimized = model_gold_gbt_optimized.predict(X_test)
    y_pred_total_gbt_optimized = model_total_gbt_optimized.predict(X_test)

    mse_gold_gbt_optimized = mean_squared_error(y_test_gold, y_pred_gold_gbt_optimized)
    mae_gold_gbt_optimized = mean_absolute_error(y_test_gold, y_pred_gold_gbt_optimized)

    mse_total_gbt_optimized = mean_squared_error(y_test_total, y_pred_total_gbt_optimized)
    mae_total_gbt_optimized = mean_absolute_error(y_test_total, y_pred_total_gbt_optimized)

    print(
        f"Optimized Gradient Boosting Trees Model - Gold Medal Prediction MSE: {mse_gold_gbt_optimized:.2f}, MAE: {mae_gold_gbt_optimized:.2f}")
    print(
        f"Optimized Gradient Boosting Trees Model - Total Medal Prediction MSE: {mse_total_gbt_optimized:.2f}, MAE: {mae_total_gbt_optimized:.2f}")


def visualize_feature_importance(model, features, picture_folder, filename, title):
    """Visualize feature importance."""
    plt.figure(figsize=(10, 6))
    plt.barh(features, model.feature_importances_, color='skyblue')
    plt.title(title)
    plt.xlabel('Importance')
    plt.ylabel('Feature')
    plt.grid(axis='x')  # Add vertical grid lines
    plt.tight_layout()
    plt.savefig(f"{picture_folder}/{filename}")
    plt.show()


def visualize_residuals(y_test, y_pred, picture_folder, filename, title):
    """Visualize residual plot."""
    residuals = y_test - y_pred
    plt.figure(figsize=(12, 6))
    plt.scatter(y_pred, residuals, color='green', label='Gradient Boosting Trees')
    plt.axhline(y=0, color='red', linestyle='--')
    plt.title(title)
    plt.xlabel('Predicted Values')
    plt.ylabel('Residuals')
    plt.legend()
    plt.grid(True)  # Add both horizontal and vertical grid lines
    plt.tight_layout()
    plt.savefig(f"{picture_folder}/{filename}")
    plt.show()


def shap_analysis(model, X_test, features, picture_folder, filename, title):
    """Perform SHAP analysis and visualize SHAP values."""
    explainer = shap.TreeExplainer(model)
    shap_values = explainer.shap_values(X_test)

    plt.figure(figsize=(10, 6))
    shap.summary_plot(shap_values, X_test, feature_names=features, show=False)
    plt.title(title)
    plt.grid(True)  # Add both horizontal and vertical grid lines
    plt.tight_layout()
    plt.savefig(f"{picture_folder}/{filename}")
    plt.show()


# Main execution
if __name__ == "__main__":
    # Ensure the picture folder exists
    picture_folder = ensure_picture_folder_exists()

    # Step 1: Load data
    medal_data, program_data = load_data()

    # Step 2: Data Preprocessing
    medal_data, program_data = preprocess_data(medal_data, program_data)

    # Step 3: Feature Engineering
    medal_data = feature_engineering(medal_data)

    # Step 4: Check and handle NaN values
    medal_data = check_and_fill_nan(medal_data)

    # Step 5: Extract features and target variables
    X, y_gold, y_total = extract_features_and_target(medal_data)

    # Step 6: Split the data into training and testing sets
    X_train, X_test, y_train_gold, y_test_gold, y_train_total, y_test_total = split_data(X, y_gold, y_total)

    # Step 7: Hyperparameter tuning
    model_gold_gbt_optimized, model_total_gbt_optimized = hyperparameter_tuning(X_train, y_train_gold, y_train_total)

    # Step 8: Model evaluation
    evaluate_model(model_gold_gbt_optimized, model_total_gbt_optimized, X_test, y_test_gold, y_test_total)

    # Step 9: Visualization
    visualize_feature_importance(model_gold_gbt_optimized, X.columns, picture_folder, "3.1.png",
                                 'Feature Importance for Gold Medal Prediction (Optimized Gradient Boosting Trees)')

    visualize_residuals(y_test_gold, model_gold_gbt_optimized.predict(X_test), picture_folder, "3.2.png",
                        'Residual Plot for Gold Medal Prediction (Optimized Gradient Boosting Trees)')

    shap_analysis(model_gold_gbt_optimized, X_test, X.columns, picture_folder, "3.3.png",
                  'SHAP Summary Plot for Gold Medal Prediction (Optimized Gradient Boosting Trees)')

    shap_analysis(model_total_gbt_optimized, X_test, X.columns, picture_folder, "3.4.png",
                  'SHAP Summary Plot for Total Medal Prediction (Optimized Gradient Boosting Trees)')