#!/usr/bin/env python
# -*- coding: utf-8 -*-

import os
from GenreFeatureData import GenreFeatureData
from sklearn.metrics import accuracy_score
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import cross_val_score
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.linear_model import LogisticRegression, LinearRegression
from sklearn.svm import SVC
from xgboost import XGBClassifier
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.neighbors import KNeighborsClassifier
from sklearn.neural_network import MLPClassifier
import numpy as np
from sklearn.metrics import roc_curve, auc, confusion_matrix, classification_report
import matplotlib.pyplot as plt
from scipy import interp
from itertools import cycle
from sklearn.preprocessing import label_binarize
import scienceplots
def plot_roc_curve(model_name,y_true, y_score, n_classes):
    # Binarize the output labels for each class
    y_test_bin = label_binarize(y_true, classes=range(n_classes))

    # Compute ROC curve and ROC area for each class
    fpr = dict()
    tpr = dict()
    roc_auc = dict()
    
    for i in range(n_classes):
        fpr[i], tpr[i], _ = roc_curve(y_test_bin[:, i], y_score[:, i])
        roc_auc[i] = auc(fpr[i], tpr[i])

    # Compute micro-average ROC curve and ROC area
    fpr["micro"], tpr["micro"], _ = roc_curve(y_test_bin.ravel(), y_score.ravel())
    roc_auc["micro"] = auc(fpr["micro"], tpr["micro"])

    with plt.style.context('ieee'):
    # Plot all ROC curves
        plt.figure(figsize=(5,4))
        plt.plot(fpr["micro"], tpr["micro"],
                label='micro-average ROC curve (area = {0:0.2f})'
                    ''.format(roc_auc["micro"]),
                color='deeppink', linestyle=':', linewidth=4)
        
        # Plot ROC curve for each class
        for i in range(n_classes):
            plt.plot(fpr[i], tpr[i], lw=2,
                    label='ROC curve of class {0} (area = {1:0.2f})'
                        ''.format(i, roc_auc[i]))

        plt.plot([0, 1], [0, 1], 'k--', lw=2)
        plt.xlim([0.0, 1.0])
        plt.ylim([0.0, 1.05])
        plt.xlabel('False Positive Rate')
        plt.ylabel('True Positive Rate')
        plt.title('ROC to multi-class')
        plt.legend(loc="lower right")
        os.makedirs("./figs",exist_ok=True)
        plt.savefig(f'./figs/{model_name}.jpg')

def main():
    genre_features = GenreFeatureData()

    # 加载和预处理数据
    if not all([
        os.path.isfile(genre_features.train_X_preprocessed_data),
        os.path.isfile(genre_features.train_Y_preprocessed_data),
        os.path.isfile(genre_features.dev_X_preprocessed_data),
        os.path.isfile(genre_features.dev_Y_preprocessed_data),
        os.path.isfile(genre_features.test_X_preprocessed_data),
        os.path.isfile(genre_features.test_Y_preprocessed_data)
    ]):
        print("Preprocessing raw audio files")
        genre_features.load_preprocess_data()
    else:
        print("Deserializing preprocessed files")
        genre_features.load_deserialize_data()

    # 转换为 NumPy 数组
    train_X = genre_features.train_X
    train_Y = genre_features.train_Y.argmax(axis=1)  # 将 one-hot 编码转换为标签
    unique_values = np.unique(train_Y)
    print(unique_values)
    

    dev_X = genre_features.dev_X
    dev_Y = genre_features.dev_Y.argmax(axis=1)
    unique_values = np.unique(dev_Y)
    print(unique_values)
    test_X = genre_features.test_X
    test_Y = genre_features.test_Y.argmax(axis=1)
    unique_values = np.unique(test_Y)
    print(test_Y)
    train_X = train_X.reshape(train_X.shape[0], -1)
    dev_X = dev_X.reshape(dev_X.shape[0], -1)
    test_X = test_X.reshape(test_X.shape[0], -1)
    
    scaler = StandardScaler()
    train_X = scaler.fit_transform(train_X)
    dev_X = scaler.transform(dev_X)
    test_X = scaler.transform(test_X)
    # print(train_Y.shape)
    # print(dev_Y.shape)
    # print(test_Y.shape)
    
    train_X = np.concatenate((train_X,test_X),axis=0)
    train_Y = np.concatenate((train_Y,test_Y),axis=0)
    
    X = np.concatenate((train_X,test_X,dev_X),axis=0)
    y = np.concatenate((train_Y,test_Y,dev_Y),axis=0)
    # 假设你的特征数据保存在 X 变量中，标签保存在 y 变量中
    train_X, test_X, train_Y, test_Y = train_test_split(X, y, test_size=0.2, random_state=42)
    print(train_Y.shape)
    print(dev_Y.shape)
    print(test_Y.shape)

    models = {
        'RandomForestClassifier': RandomForestClassifier(),
        # 'GradientBoostingClassifier': GradientBoostingClassifier(),
        'DecisionTreeClassifier': DecisionTreeClassifier(),
        'SVC': SVC(probability=True),
        'kNN':KNeighborsClassifier(),
        'XGBClassifier': XGBClassifier(eval_metric='mlogloss'),
        'MLPClassifier': MLPClassifier()
    }

    # Define parameter grids for each model
    param_grids = {
        'RandomForestClassifier': {'n_estimators': [50, 100, 200], 'max_depth': [5, 10]},
        'GradientBoostingClassifier': {'n_estimators': [50, 100, 200], 'learning_rate': [0.01, 0.1]},
        'DecisionTreeClassifier': {'max_depth': [5, 10]},
        'SVC': {'C': [13], 'kernel': ['rbf']},
        'kNN':{'n_neighbors':range(1,11)},
        'XGBClassifier': {'n_estimators': [100, 200], 'learning_rate': [0.01, 0.1]},
        'MLPClassifier': {'hidden_layer_sizes': [(50,), (100,)]}
    }

    best_estimators = {}
    
    for model_name, model in models.items():
        print(f"\nGrid searching {model_name}...")
        
        grid_search = GridSearchCV(model, param_grids[model_name], cv=5, scoring='accuracy')
        grid_search.fit(train_X, train_Y)
        
        best_estimators[model_name] = grid_search.best_estimator_
        print(f"Best params for {model_name}: {grid_search.best_params_}")
        print(f"Best cross-validated accuracy of {model_name}: {grid_search.best_score_ * 100:.2f}%")

        # Predict on development set
        test_predictions = best_estimators[model_name].predict(test_X)
        test_accuracy = accuracy_score(test_Y, test_predictions)
        print(f"Test Accuracy of {model_name}: {test_accuracy * 100:.2f}%")
        
         # Confusion Matrix
        cm = confusion_matrix(test_Y, test_predictions)
        print(f"Confusion Matrix of {model_name}:\n{cm}")

        # Classification report
        cls_report = classification_report(test_Y, test_predictions)
        print(f"Classification Report of {model_name}:\n{cls_report}")

        # ROC Curve (for models with probability estimates)
        if hasattr(best_estimators[model_name], "predict_proba"):
            y_score = best_estimators[model_name].predict_proba(test_X)
            plot_roc_curve(model_name,test_Y, y_score, 6)

        # For models without predict_proba, such as SVC with kernel='linear'
        # You can use decision_function to calculate ROC if it's available
        elif hasattr(best_estimators[model_name], "decision_function"):
            y_score = best_estimators[model_name].decision_function(test_X)
            y_score = np.exp(y_score) / np.sum(np.exp(y_score), axis=1, keepdims=True)  # Softmax
            plot_roc_curve(model_name, test_Y, y_score, 6)

    # You can now use best_estimators for further predictions or analyses.

if __name__ == "__main__":
    main()