# This is a sample Python script.
import csv

import lightgbm as lgb
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import xgboost as xgb
from sklearn import svm
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import roc_curve, auc
from sklearn.model_selection import train_test_split
from sklearn.utils import shuffle

from Gene import Gene
from Model import Model

dataset = []
X_train = []
Y_train = []
X_test = []
feature_names = []
for name, member in Gene.__members__.items():
    feature_names.append(str(name))


def generate_dataset(file_name, data_num, label=0):
    file = pd.read_csv(file_name)
    for i in range(1, data_num + 1):
        data = []
        for name, member in Gene.__members__.items():
            data.append(file.iloc[member.value, i])
        data.append(label)
        dataset.append(data)


def dataset_to_csv():
    with open('X_train.csv', 'w', encoding='utf-8', newline='') as file_obj:
        writer = csv.writer(file_obj)
        # 写表头
        writer.writerow(feature_names)
        # 写数据
        for X in X_train:
            writer.writerow(X)

    with open('Y_train.csv', 'w', encoding='utf-8', newline='') as file_obj:
        writer = csv.writer(file_obj)
        # 写表头
        writer.writerow(['label'])
        # 写数据
        for Y in Y_train:
            label = []
            label.append(Y)
            writer.writerow(label)


def train(clf):
    if isinstance(clf, lgb.LGBMClassifier):
        clf.fit(X=X_train, y=Y_train, feature_name=feature_names)
    else:
        clf.fit(X=X_train, y=Y_train)
    Y_prediction = clf.predict_proba(X_test)
    print(str(clf.__class__.__name__) + ' score = ' + str(clf.score(X=X_test, y=Y_test)))
    fpr, tpr, thresholds = roc_curve(Y_test, Y_prediction[:, 1], drop_intermediate=False, pos_label=1)
    return fpr, tpr, clf


def lgb_feature_importance(clf):
    fig, ax = plt.subplots(figsize=(16, 10))
    lgb.plot_importance(clf, max_num_features=15, ax=ax)
    plt.title('feature importance sorted by LightGBM')
    plt.show()


def xgb_feature_importance(clf: xgb.XGBClassifier):
    fig, ax = plt.subplots(figsize=(16, 10))
    clf.get_booster().feature_names = feature_names
    xgb.plot_importance(booster=clf.get_booster(), ax=ax)
    plt.title('feature importance sorted by XGBoost')
    plt.show()


def rf_feature_importance(clf):
    if isinstance(clf, RandomForestClassifier):
        print("Feature importance ranking by rf:")
        print(sorted(zip(map(lambda x: round(x, 4), clf.feature_importances_), feature_names)))


def svm_feature_importance(clf):
    if isinstance(clf, svm.SVC):
        coef = clf.coef_
        coef = np.abs(coef)
        importance = np.sum(coef, axis=0)  # 计算每个特征的重要性之和
        print("Feature importance ranking by svm:")
        for i in np.argsort(importance)[::-1]:
            print(" %s (%f)" % (feature_names[i], importance[i]))


def roc(names, fprs, tprs, colors):
    plt.figure(figsize=(10, 10))
    for name, fpr, tpr, color in zip(names, fprs, tprs, colors):
        plt.plot(fpr, tpr, lw=3, label='{} (AUC={:.3f})'.format(name, auc(fpr, tpr)), color=color)
        plt.plot([0, 1], [0, 1], '--', lw=5, color='grey')
        plt.axis('square')
        plt.xlim([0, 1])
        plt.ylim([0, 1])
        plt.xlabel('False Positive Rate', fontsize=20)
        plt.ylabel('True Positive Rate', fontsize=20)
        plt.title('ROC Curve', fontsize=25)
        plt.legend(loc='lower right', fontsize=20)

    return plt


# Press the green button in the gutter to run the script.
if __name__ == '__main__':
    generate_dataset(file_name="normal.csv", data_num=19, label=0)
    print('normal dataset generated!')
    generate_dataset(file_name="sick.csv", data_num=400, label=1)
    print('sick dataset generated!')

    X = []
    Y = []
    for data in dataset:
        x_data = []
        for i in range(0, 15):
            x_data.append(data[i])
        X.append(x_data)
        Y.append(data[15])
    X, Y = shuffle(X, Y, random_state=36)

    X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.25, random_state=36)
    dataset_to_csv()

    fprs = []
    tprs = []
    names = []
    colors = ['crimson', 'orange', 'green', 'blue']
    for name, member in Model.__members__.items():
        names.append(name)
        fpr, tpr, clf = train(member.value)
        fprs.append(fpr)
        tprs.append(tpr)
        if name == Model.LightGBM.name:
            lgb_feature_importance(clf)
        elif name == Model.XGBoost.name:
            xgb_feature_importance(clf)
        elif name == Model.RandomForest.name:
            rf_feature_importance(clf)
        elif name == Model.SVM.name:
            svm_feature_importance(clf)
    # roc(names, fprs, tprs, colors).show()

# See PyCharm help at https://www.jetbrains.com/help/pycharm/
