import matplotlib as mpl
import matplotlib.pyplot as plt

from sklearn.datasets import make_gaussian_quantiles
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.metrics import accuracy_score #计算roc和auc

## 设置属性防止中文乱码
mpl.rcParams['font.sans-serif']=[u'simHei']
mpl.rcParams['axes.unicode_minus']=False

def run():
   # 创建模拟数据
   X,Y=make_gaussian_quantiles(n_samples=13000,n_features=10,n_classes=3,random_state=1)
   n_split=3000
   X_train,X_test=X[:n_split],X[n_split:]
   Y_train,Y_test=Y[:n_split],Y[n_split:]
   # 建立两个模型，algorithm算法不同，bdt_real选择的是samme.r
   abc_real=AdaBoostClassifier(DecisionTreeClassifier(max_depth=2),n_estimators=600,learning_rate=1,algorithm='SAMME.R')
   abc_discrete=AdaBoostClassifier(DecisionTreeClassifier(max_depth=2),n_estimators=600,learning_rate=1,algorithm='SAMME')
   abc_real.fit(X_train,Y_train)
   abc_discrete.fit(X_train,Y_train)
   # 获得预测的准确率，accuracy_score，是单个分类器的准确率。
   # 预测的误差率estimator_errors_
   real_test_errors = []  # 第一个模型每一个分类器的误差率
   discrete_test_errors = []  # 第二个模型每一个分类器的误差率
   for real_train_predict,discrete_train_predict in zip(abc_real.staged_predict(X_test),abc_discrete.staged_predict(X_test)):
       real_test_errors.append(1-accuracy_score(real_train_predict,Y_test))
       discrete_test_errors.append(1-accuracy_score(discrete_train_predict,Y_test))
   n_trees_discrete=len(abc_discrete)
   n_trees_real=len(abc_real)
   real_estimator_errors=abc_real.estimator_errors_[:n_trees_real]
   discrete_estimator_errors=abc_discrete.estimator_errors_[:n_trees_discrete]
   discrete_estimator_weight=abc_discrete.estimator_weights_[:n_trees_discrete] # 获得权重

   plt.figure(figsize=(15,5),facecolor='w')
   plt.subplot(1,3,1)
   plt.plot(range(1,n_trees_real+1),real_test_errors,'r',linestyle='dashed',label='SAMME.R')
   plt.plot(range(1,n_trees_discrete+1),discrete_test_errors,'g',label='SAMME')
   plt.legend()
   plt.ylim(0.18,0.62)
   plt.ylabel(u'测试数据的预测错误率')
   plt.xlabel(u'弱分类器数目')

   plt.subplot(1,3,2)
   plt.plot(range(1,n_trees_real+1),real_estimator_errors,'r',label='SAMME.R',alpha=.5)
   plt.plot(range(1,n_trees_discrete+1),discrete_estimator_errors,'b',label='SAMME',alpha=.5)
   plt.legend()
   plt.ylabel(u'模型实际错误率')
   plt.xlabel(u'弱分类器数目')
   plt.ylim((.2,max(real_estimator_errors.max(),discrete_estimator_errors.max())*1.2))
   plt.xlim((-20,len(abc_discrete)+20))

   plt.subplot(1,3,3)
   plt.plot(range(1,n_trees_discrete+1),discrete_estimator_weight,'b',label='SAMME')
   plt.legend()
   plt.ylabel(u'权重')
   plt.xlabel(u'弱分类器编号')
   plt.xlim((-20,n_trees_discrete+20))
   plt.ylim((0,discrete_estimator_weight.max()*1.2))

   # 显示
   plt.subplots_adjust(wspace=0.25)
   plt.show()


run()