import numpy as np
import pandas as pd
from dask.distributed import Client,LocalCluster
import matplotlib.pyplot as plt, seaborn as sns
from sklearn.metrics import confusion_matrix, classification_report, accuracy_score
from sklearn.preprocessing import MinMaxScaler, RobustScaler
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score, recall_score, f1_score
import time
from sklearn.impute import SimpleImputer
from imblearn.over_sampling import SMOTE
import dask.dataframe as dd
import joblib
def run():
    start = time.time()

    # local dask cluster 
    # cluster = LocalCluster(n_workers=4) 
    client = Client() 
    client.restart()
    ds = pd.read_csv('./dataset/creditcard.csv')

    # scale time and amount
    rob_scaler = RobustScaler() # Reduce influence of outliers in scaling using IQR (Inter Quartile Range)
    ds['Amount'] = rob_scaler.fit_transform(ds['Amount'].values.reshape(-1,1))
    ds['Time'] = rob_scaler.fit_transform(ds['Time'].values.reshape(-1,1))

    # missing_values = ds.isnull().sum().sum()
    X = ds.iloc[:, :-1]
    y = ds.iloc[:, -1]
    # data preprocess
    # stratify=y to maintain fraud proportion in train and test set
    X_train_resampled, x_test, y_train_resampled, y_test = train_test_split(
        X, y, random_state=3, train_size=.9, stratify=y)

    smote = SMOTE(random_state=3)
    X_train_resampled, y_train_resampled = smote.fit_resample(X_train_resampled, y_train_resampled)
    print(X_train_resampled.shape)

    # model training and prediction
    clf = RandomForestClassifier(n_estimators=100, criterion='gini', n_jobs= 4)
    with joblib.parallel_backend("dask"):
        clf.fit(X_train_resampled, y_train_resampled)
        y_pred = clf.predict(x_test)
    end_time = time.time() - start
    # client.close()
    # cluster.close()

    # acc data
    accuracy = accuracy_score(y_test, y_pred)
    print(f"RandomForest 准确率:{accuracy * 100:.3f}%")
    recall = recall_score(y_test, y_pred, average='macro')
    print(f"RandomForest 召回率:{recall * 100:.3f}%")
    f1 = f1_score(y_test, y_pred, average='macro')
    print(f"RandomForest F1:{f1 * 100:.3f}%")
    print(f"used time:{end_time}")

    report = classification_report(y_test, y_pred)
    cm = confusion_matrix(y_test, y_pred)
    print("RandomForest Classification Report:\n")
    print(report)
    print("RandomForest Confusion Matrix:\n")
    print(cm)

    # plot heatmap for res visualization
    cm = pd.crosstab(y_test, y_pred, rownames=['Actual'], colnames=['Predicted'])
    fig, (ax1) = plt.subplots(ncols=1, figsize=(5,5))
    sns.heatmap(cm, 
                xticklabels=['Not Fraud', 'Fraud'],
                yticklabels=['Not Fraud', 'Fraud'],
                annot=True,ax=ax1,
                linewidths=.2,linecolor="Darkblue", cmap="Blues")
    plt.title('Confusion Matrix', fontsize=14)
    plt.show()

if __name__ == "__main__":
    run()