import pandas as pd
import numpy as np
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
import matplotlib.pyplot as plt

df = pd.read_csv("data/0_1.csv")

x_columns = [x for x in df.columns if x != "loan_status"]

X = df[x_columns]
y = df['loan_status']

Xtrain, Xtest, Ytrain, Ytest = train_test_split(X, y, test_size=0.8)

# 先做一次简单的测试
# rfc = RandomForestClassifier(n_estimators=100, random_state=90)
# score_pre = cross_val_score(rfc, Xtrain, Ytrain, cv=10).mean()
# print(score_pre)  # 评分0.8685680480432707

# # n_estimators粗调, 100 - 150比较合适
# scorel = []
# for i in range(10, 510, 10):
#     print("trees number : {}".format(i))
#     rfc = RandomForestClassifier(n_estimators=i, n_jobs=-1, random_state=90)
#     score = cross_val_score(rfc, Xtrain, Ytrain, cv=10).mean()
#     scorel.append(score)
# plt.figure(figsize=[20, 5])
# plt.plot(range(10, 510, 10), scorel)
# plt.xlabel('trees')
# plt.ylabel('accuracy rate')
# plt.title("ntree_fit")
# print(max(scorel))
# plt.show()

# max_depth 粗调,只有13个label, 最优是6-8
# scorel = []
# for i in range(1, 13):
#     print("max_depth : {}".format(i))
#     rfc = RandomForestClassifier(n_estimators=170, n_jobs=-1, random_state=90, max_depth=i)
#     score = cross_val_score(rfc, Xtrain, Ytrain, cv=10).mean()
#     scorel.append(score)
# plt.figure(figsize=[20, 5])
# plt.plot(range(1, 13), scorel)
# plt.xlabel('tree_depth')
# plt.ylabel('accuracy rate')
# plt.title("depth_fit")
# plt.show()

#  max_features 粗调,结果呈一条直线，用默认值即可
# scorel = []
# for i in range(1, 13):
#     print("max_features : {}".format(i))
#     rfc = RandomForestClassifier(n_estimators=170,
#                                  n_jobs=-1,
#                                  random_state=90,
#                                  max_depth=7,
#                                  max_leaf_nodes=5,
#                                  max_features=i)
#     score = cross_val_score(rfc, Xtrain, Ytrain, cv=10).mean()
#     scorel.append(score)
# plt.figure(figsize=[20, 5])
# plt.plot(range(1, 13), scorel)
# plt.xlabel('tree_features')
# plt.ylabel('accuracy rate')
# plt.title("features_fit")
# plt.show()

# max_leaf_nodes 调参,因为只有两个类别，因此没有调参的必要

# 网格搜索最佳参数
"""
有一些参数是没有参照的，很难说清一个范围，这种情况下我们使用学习曲线，看趋势
从曲线跑出的结果中选取一个更小的区间，再跑曲线
param_grid = {'n_estimators':np.arange(0, 200, 10)}
param_grid = {'max_depth':np.arange(1, 20, 1)}
 
param_grid = {'max_leaf_nodes':np.arange(25,50,1)}
对于大型数据集，可以尝试从1000来构建，先输入1000，每100个叶子一个区间，再逐渐缩小范围
有一些参数是可以找到一个范围的，或者说我们知道他们的取值和随着他们的取值，模型的整体准确率会如何变化，这
样的参数我们就可以直接跑网格搜索
param_grid = {'criterion':['gini', 'entropy']}
param_grid = {'min_samples_split':np.arange(2, 2+20, 1)}
param_grid = {'min_samples_leaf':np.arange(1, 1+10, 1)}
param_grid = {'max_features':np.arange(5,30,1)}
"""

# 经过粗调后找到最佳参数的大致区间，然后使用网格探索找到最优的参数
# param_grid = {
#     'n_estimators': np.arange(100, 150, 1),
#     'max_depth': np.arange(5, 10, 1),
# }
# rfc = RandomForestClassifier(random_state=90)
# GS = GridSearchCV(rfc, param_grid, cv=10)
# GS.fit(Xtrain, Ytrain)
# print(GS.best_params_)
# print(GS.best_score_)

# 经过调优后发现评分并没有显著的增长,分许原因发现应该是数据不均衡造成的
print(y.value_counts())
# 1    19588
# 0     2861


rfc = RandomForestClassifier(n_estimators=157, n_jobs=-1, random_state=90, max_depth=20, max_features=7)
score = cross_val_score(rfc, Xtrain, Ytrain, cv=10).mean()

print("训练集评分:",score)  # 训练集评分 0.8703502823735285

rfc.fit(Xtrain, Ytrain)
print("测试集评分:",rfc.score(Xtest, Ytest))  # 测试集评分 0.8711024498886414
