import pandas as pd
import numpy as np
import matplotlib.pyplot as plt

from sklearn.datasets import load_breast_cancer
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import train_test_split

datas = load_breast_cancer()
# print(datas)

# 简单的建模 做个评估
# rfc = RandomForestClassifier(random_state=10, n_estimators=100)
# score_pre = cross_val_score(rfc, datas.data, datas.target, cv=10).mean()
# print(score_pre)

# 对n_estimators进行调参
# scorel = []
# 先大概设定一个范围和步长 这样可以节省时间
# for i in range(1, 201, 10):
#     rfc = RandomForestClassifier(n_estimators=i, random_state=90)
#     score_pre = cross_val_score(rfc, datas.data, datas.target, cv=10).mean()
#     scorel.append(score_pre)
#
# plt.figure(figsize=[20, 5])
# plt.plot(range(1, 201, 10), scorel)
# plt.show()
#
# print(max(scorel), (scorel.index(max(scorel)) * 10))

# 由结果看出 在n_estimators=40左右有一个最优参数
# for i in range(35, 51):
#     rfc = RandomForestClassifier(n_estimators=i, random_state=90)
#     score_pre = cross_val_score(rfc, datas.data, datas.target, cv=10).mean()
#     scorel.append(score_pre)
#
# plt.figure(figsize=[20, 5])
# plt.plot(range(35, 51), scorel)
# plt.show()
#
# print(max(scorel), (scorel.index(max(scorel))))

# 最后得出 n_estimators=45为最优参数
#
# # 对于一些没有参照的参数 可以用网格搜索确定大概的范围，而后再缩小范围，跑跑曲线
# param_grid = {'n_estimators': np.arange(1, 201, 10)}
param_grid = {'max_depth': np.arange(1, 20, 1)}
# param_grid = {'max_leaf_nodes': np.arange(25, 50, 1)}
# # 对于大型数据，可以尝试从1000来构建，然后逐渐缩小范区间
#
# # 一些参数知道它们大概的取值，可以直接用网格跑
# param_grid = {'criterion': ['gini', 'entropy']}
# param_grid = {'min_samples_split': np.arange(2, 2 + 20, 1)}
# param_grid = {'min_samples_leaf': np.arange(1, 1 + 10, 1)}
# param_grid = {'max_features': np.arange(5, 30, 1)}

# rfc = RandomForestClassifier(n_estimators=45, random_state=10)
# gsc = GridSearchCV(rfc, param_grid, cv=10).fit(datas.data, datas.target)
# print(gsc.best_params_)
# print(gsc.best_score_)
# 可以观察到 限制深度后 准确率下降，抛开拟合问题，现在谈论的是准确度，所以可以不用限制树的深度了。。

xtrain, xtext, ytrain, ytest = train_test_split(datas.data, datas.target, test_size=0.3, random_state=10)
rfc = RandomForestClassifier(n_estimators=45, random_state=10)
rfc.fit(xtrain, ytrain)
predict = rfc.score(xtext, ytest)
print(predict)
