import pandas as pd
import numpy as np
import seaborn as sns
import xgboost as xgb
import matplotlib.pyplot as plt
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler,PowerTransformer
from sklearn.linear_model import LinearRegression,LassoCV,LogisticRegression
from sklearn.ensemble import RandomForestClassifier,RandomForestRegressor
from sklearn.model_selection import KFold,train_test_split,StratifiedKFold,GridSearchCV,cross_val_score
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score,accuracy_score, \
                            precision_score,recall_score, roc_auc_score
import warnings
warnings.filterwarnings('ignore')

# import dataset
# Give me some credit


train_df = pd.read_csv(r'/SACN.pytorch/data/user_train.csv')
test_df = pd.read_csv(r'/SACN.pytorch/data/user_test.csv')

X = train_df.drop(['SeriousDlqin2yrs'],axis=1)
y = train_df['SeriousDlqin2yrs']
W = test_df.drop(['SeriousDlqin2yrs'],axis=1)
z = test_df['SeriousDlqin2yrs']


# UCI_Credit_Card
'''
train_df = pd.read_csv(r'D:/based-saafm/SACN.pytorch/data/UCI_Credit_Card_train.csv')
test_df = pd.read_csv(r'D:/based-saafm/SACN.pytorch/data/UCI_Credit_Card_test.csv')

X = train_df.drop(['default.payment.next.month'],axis=1)
y = train_df['default.payment.next.month']
W = test_df.drop(['default.payment.next.month'],axis=1)
z = test_df['default.payment.next.month']
'''

cv = StratifiedKFold(n_splits=3,shuffle=True)
X_train, X_test, y_train, y_test = train_test_split(X,y,random_state=None)

# View classification model performance
def perfomance_clf(model,X,y,name=None):
    y_predict = model.predict(X)
    if name:
        print(name,':')
    print(f'accuracy score is: {accuracy_score(y,y_predict)}')
    print(f'precision score is: {precision_score(y,y_predict)}')
    print(f'recall score is: {recall_score(y,y_predict)}')
    print(f'auc: {roc_auc_score(y,y_predict)}')
    print('- - - - - - ')


# Random Forest Classification
rf_clf = RandomForestClassifier(criterion='gini',
                               n_jobs=-1,
                               n_estimators=1000)    # random_state
# Parameters Setting
rf_grid_params = {'max_features':['auto',0.5,0.6,0.9],    # ['auto',0.5,0.6,0.9]
                    'max_depth':[6,9]}    # [3,6,9]
# Parameters Searching
rf_gridsearch = GridSearchCV(rf_clf,rf_grid_params,cv=cv,
                               n_jobs=-1,scoring='roc_auc',verbose=10,refit=True)
# Workflow Pipelines
pipe_rf = Pipeline([
        ('sc',StandardScaler()),    # 标准化Z-score
        ('pow_trans',PowerTransformer()),    # 纠偏
        ('log_grid',rf_gridsearch)
        ])
# Searching for parameters and training models
pipe_rf.fit(X_train,y_train)
# Optimal parameter combination
print(pipe_rf.named_steps['log_grid'].best_params_)
# Performance metrics for the training dataset
perfomance_clf(pipe_rf,X_train,y_train,name='train')
# Performance metrics for the test dataset
perfomance_clf(pipe_rf,X_test,y_test,name='test')

# Cross-validation to view average scores
print('random_forest_acc:',cross_val_score(pipe_rf,X_train,y_train,cv=3,scoring='accuracy').mean())
print('random_forest_max_auc:',cross_val_score(pipe_rf,X_train,y_train,cv=3,scoring='roc_auc').max())
print('random_forest_auc:',cross_val_score(pipe_rf,X_train,y_train,cv=3,scoring='roc_auc').mean())
print('random_forest_F1:',cross_val_score(pipe_rf,X_train,y_train,cv=3,scoring='f1').mean())