from imblearn.over_sampling import SMOTE
import pandas as pd
from pandas import DataFrame
import numpy as np
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
import matplotlib.pyplot as plt

df = pd.read_csv("data/0_1.csv")
x_columns = [x for x in df.columns if x != "loan_status"]
X = df[x_columns]
y = df['loan_status']

# 调整数据均衡
Xtrain, Xtest, Ytrain, Ytest = train_test_split(X, y, test_size=0.8)
smo = SMOTE(random_state=42)
Xtrain, Ytrain = smo.fit_sample(Xtrain, Ytrain)

count = {0: 0, 1: 0}
for value in Ytrain:
    count[value] = count[value] + 1
print(count)  # {0: 3904, 1: 3904}

# rfc = RandomForestClassifier(n_estimators=100, random_state=90)
# score_pre = cross_val_score(rfc, Xtrain, Ytrain, cv=10).mean()
# print(score_pre)  # 0.8996153846153845 相比未经处理的数据已经有明显的提高

# n_estimators粗调, 150 - 170比较合适
# scorel = []
# for i in range(10, 510, 10):
#     print("trees number : {}".format(i))
#     rfc = RandomForestClassifier(n_estimators=i, n_jobs=-1, random_state=90)
#     score = cross_val_score(rfc, Xtrain, Ytrain, cv=10).mean()
#     scorel.append(score)
# plt.figure(figsize=[20, 5])
# plt.plot(range(10, 510, 10), scorel)
# print(max(scorel))
# plt.show()

# max_depth 粗调, 最优是15 - 20
# scorel = []
# for i in range(10, 30):
#     print("max_depth : {}".format(i))
#     rfc = RandomForestClassifier(n_estimators=170, n_jobs=-1, random_state=90, max_depth=i)
#     score = cross_val_score(rfc, Xtrain, Ytrain, cv=10).mean()
#     scorel.append(score)
# print(max(scorel))
# plt.figure(figsize=[20, 5])
# plt.plot(range(10, 30), scorel)
# plt.show()

#  max_features 粗调, 3 - 8 之间
# scorel = []
# for i in range(1, 13):
#     print("max_features : {}".format(i))
#     rfc = RandomForestClassifier(n_estimators=170,
#                                  n_jobs=-1,
#                                  random_state=90,
#                                  max_depth=7,
#                                  max_leaf_nodes=5,
#                                  max_features=i)
#     score = cross_val_score(rfc, Xtrain, Ytrain, cv=10).mean()
#     scorel.append(score)
# print(max(scorel))
# plt.figure(figsize=[20, 5])
# plt.plot(range(1, 13), scorel)
# plt.show()

# max_leaf_nodes 调参,因为只有两个类别，因此没有调参的必要

# 经过粗调后找到最佳参数的大致区间，然后使用网格探索找到最优的参数。网格探索非常耗时，谨慎使用
# {'max_depth': 20, 'max_features': 7, 'n_estimators': 157}
# 0.9115673976110598

# param_grid = {
#     'n_estimators': np.arange(100, 200, 1),
#     'max_depth': np.arange(15, 21, 1),
#     'max_features': np.arange(3, 9, 1),
# }
# rfc = RandomForestClassifier(random_state=90)
# GS = GridSearchCV(rfc, param_grid, cv=10, n_jobs=-1)
# GS.fit(Xtrain, Ytrain)
# print(GS.best_params_)
# print(GS.best_score_)


rfc = RandomForestClassifier(n_estimators=157, n_jobs=-1, random_state=90, max_depth=20, max_features=7)
score = cross_val_score(rfc, Xtrain, Ytrain, cv=10).mean()

print("训练集评分:",score)  # 0.903201787906158 训练集评分

rfc.fit(Xtrain, Ytrain)
print("测试集评分:",rfc.score(Xtest, Ytest))  # 0.903201787906158 测试集评分
