#!/usr/bin/env python3
# -*- coding: utf-8 -*-

import os
import json
import numpy as np
import pandas as pd
import warnings
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.metrics import classification_report, confusion_matrix, accuracy_score, precision_score, recall_score, f1_score
from sklearn.model_selection import train_test_split
import lightgbm as lgb
import catboost as cbt
import xgboost as xgb
from statistics import mode
from imblearn.over_sampling import SMOTE
import joblib
import argparse


def train_models(data_path):
    try:
        df = pd.read_csv(data_path)
    except Exception as e:
        print(f"Unable to load dataset: {e}")
        return False

    print("Class distribution:")
    print(df.Label.value_counts())

    X = df.drop(['Label'], axis=1)
    y = df['Label']
    X_train, X_test, y_train, y_test = train_test_split(
        X, y, train_size=0.8, test_size=0.2, random_state=0)

    print("Original training set class distribution:")
    print(pd.Series(y_train).value_counts())

    smote = SMOTE(random_state=42)
    X_train, y_train = smote.fit_resample(X_train, y_train)

    print("Training set class distribution after SMOTE:")
    print(pd.Series(y_train).value_counts())

    if not os.path.exists('models'):
        os.makedirs('models')

    # Train LightGBM model
    print("Training LightGBM model...")
    lg = lgb.LGBMClassifier(random_state=42)
    lg.fit(X_train, y_train)
    y_pred_lg = lg.predict(X_test)
    lg_f1 = f1_score(y_test, y_pred_lg, average=None)
    print("LightGBM model training completed")
    print(f"F1 score for LightGBM: {lg_f1}")

    # Train XGBoost model
    print("Training XGBoost model...")
    xg = xgb.XGBClassifier(random_state=42)
    xg.fit(X_train, y_train)
    y_pred_xg = xg.predict(X_test)
    xg_f1 = f1_score(y_test, y_pred_xg, average=None)
    print("XGBoost model training completed")
    print(f"F1 score for XGBoost: {xg_f1}")

    # Train CatBoost model
    print("Training CatBoost model...")
    cb = cbt.CatBoostClassifier(random_state=42, verbose=0)
    cb.fit(X_train, y_train)
    y_pred_cb = cb.predict(X_test)
    cb_f1 = f1_score(y_test, y_pred_cb, average=None)
    print("CatBoost model training completed")
    print(f"F1 score for CatBoost: {cb_f1}")

    # Create model leadership chart
    print("Creating model leadership chart...")
    model_leadership = {}
    for i in range(len(lg_f1)):
        if max(lg_f1[i], xg_f1[i], cb_f1[i]) == lg_f1[i]:
            model_leadership[str(i)] = 'lightgbm'
        elif max(lg_f1[i], xg_f1[i], cb_f1[i]) == xg_f1[i]:
            model_leadership[str(i)] = 'xgboost'
        else:
            model_leadership[str(i)] = 'catboost'

    print("Model leadership chart:")
    print(model_leadership)

    try:
        lg.booster_.save_model('models/lightgbm_model.txt')
        print("LightGBM model saved")

        xg.save_model('models/xgboost_model.json')
        print("XGBoost model saved")

        cb.save_model('models/catboost_model.cbm')
        print("CatBoost model saved")

        with open('models/model_leadership.json', 'w') as f:
            json.dump(model_leadership, f)
        print("Model leadership chart saved")

        from sklearn.preprocessing import StandardScaler
        scaler = StandardScaler()
        scaler.fit(X)
        joblib.dump(scaler, 'models/scaler.pkl')
        print("StandardScaler saved")

        return True
    except Exception as e:
        print(f"Error when saving models: {e}")
        return False


def create_dummy_data():
    np.random.seed(42)
    n_samples = 5000

    data = np.random.rand(n_samples, 10)

    labels = np.random.randint(0, 5, n_samples)

    columns = [f'feature_{i}' for i in range(10)]
    df = pd.DataFrame(data, columns=columns)
    df['Label'] = labels

    os.makedirs('data', exist_ok=True)
    df.to_csv('data/test_dataset.csv', index=False)
    print(
        f"Test dataset saved to data/test_dataset.csv, with {n_samples} records")
    return 'data/test_dataset.csv'


if __name__ == "__main__":
    parser = argparse.ArgumentParser(
        description='Train LCCDE models and generate model files')
    parser.add_argument('--data', help='Path to dataset CSV file')
    parser.add_argument('--dummy', action='store_true',
                        help='Create and use test data')

    args = parser.parse_args()

    if args.dummy:
        data_path = create_dummy_data()
    elif args.data:
        data_path = args.data
    else:
        print("Please provide a dataset path or use --dummy to create test data")
        parser.print_help()
        exit(1)

    success = train_models(data_path)

    if success:
        print("\nAll models have been successfully trained and saved!")
        print("Model files are saved in the 'models/' directory:")
        print("- models/lightgbm_model.txt")
        print("- models/xgboost_model.json")
        print("- models/catboost_model.cbm")
        print("- models/model_leadership.json")
        print("- models/scaler.pkl")
    else:
        print("\nModel training or saving failed, please check the error messages.")
