import numpy as np
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt

from sklearn.preprocessing import Imputer
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import label_binarize
from sklearn.decomposition import PCA
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn import metrics

mpl.rcParams['font.sans-serif']=[u'simHei']
mpl.rcParams['axes.unicode_minus']=False

def run():
    columns=[u'Age', u'Number of sexual partners', u'First sexual intercourse',
       u'Num of pregnancies', u'Smokes', u'Smokes (years)',
       u'Smokes (packs/year)', u'Hormonal Contraceptives',
       u'Hormonal Contraceptives (years)', u'IUD', u'IUD (years)', u'STDs',
       u'STDs (number)', u'STDs:condylomatosis',
       u'STDs:cervical condylomatosis', u'STDs:vaginal condylomatosis',
       u'STDs:vulvo-perineal condylomatosis', u'STDs:syphilis',
       u'STDs:pelvic inflammatory disease', u'STDs:genital herpes',
       u'STDs:molluscum contagiosum', u'STDs:AIDS', u'STDs:HIV',
       u'STDs:Hepatitis B', u'STDs:HPV', u'STDs: Number of diagnosis',
       u'STDs: Time since first diagnosis', u'STDs: Time since last diagnosis',
       u'Dx:Cancer', u'Dx:CIN', u'Dx:HPV', u'Dx', u'Hinselmann', u'Schiller',
       u'Citology', u'Biopsy'] #df.columns
    path='../data/risk_factors_cervical_cancer.csv' # 数据文件路径
    datas=pd.read_csv(path)
    X=datas[columns[0:-4]]
    Y=datas[columns[-4:]]
    # 空值的处理
    X=X.replace('?',np.nan)
    # 使用Imputer给定缺省值，默认的是以mean
    # 对于缺省值，进行数据填充；默认是以列/特征的均值填充
    imputer=Imputer(missing_values="NaN")
    X=imputer.fit_transform(X,Y)
    #数据分割
    X_train,X_test,Y_train,Y_test=train_test_split(X,Y,test_size=0.2,random_state=0)
    print("训练样本数量:%d,特征属性数目:%d,目标属性数目:%d" %(X_train.shape[0], X_train.shape[1], Y_train.shape[1]))
    print("测试样本数量:%d" % X_test.shape[0])
    # 标准化
    mms=MinMaxScaler() # 分类模型，经常使用的是minmaxscaler归一化，回归模型经常用standardscaler
    X_train=mms.fit_transform(X_train)
    X_test=mms.transform(X_test)
    # 降维
    pca=PCA(n_components=2)
    X_train=pca.fit_transform(X_train)
    X_test=pca.transform(X_test)
    print(pca.explained_variance_ratio_)
    # 随机森林模型
    forest=RandomForestClassifier(n_estimators=100,criterion='gini',max_depth=1,random_state=0)  # max_depth一般不宜设置过大，把每个模型作为一个弱分类器
    forest.fit(X_train,Y_train)
    # 模型效果评估
    score=forest.score(X_test,Y_test)
    print("准确率:%.2f%%" % (score*100))
    # 模型预测
    Y_predict=forest.predict_proba(X_test)  # prodict_proba输出概率
    # 计算ROC值
    forest_fpr1,forest_tpr1,_ =metrics.roc_curve(label_binarize(Y_test[columns[-4]],classes=(0,1,2)).T[0:-1].T.ravel(),Y_predict[0].ravel())
    forest_fpr2,forest_tpr2,_ =metrics.roc_curve(label_binarize(Y_test[columns[-3]],classes=(0,1,2)).T[0:-1].T.ravel(),Y_predict[1].ravel())
    forest_fpr3,forest_tpr3,_ =metrics.roc_curve(label_binarize(Y_test[columns[-2]],classes=(0,1,2)).T[0:-1].T.ravel(),Y_predict[2].ravel())
    forest_fpr4,forest_tpr4,_ =metrics.roc_curve(label_binarize(Y_test[columns[-1]],classes=(0,1,2)).T[0:-1].T.ravel(),Y_predict[3].ravel())
    # AUC值
    auc1=metrics.auc(forest_fpr1,forest_tpr1)
    auc2=metrics.auc(forest_fpr2,forest_tpr2)
    auc3=metrics.auc(forest_fpr3,forest_tpr3)
    auc4=metrics.auc(forest_fpr4,forest_tpr4)
    print("Hinselmann目标属性AUC值：", auc1)
    print("Schiller目标属性AUC值：", auc2)
    print("Citology目标属性AUC值：", auc3)
    print("Biopsy目标属性AUC值：", auc4)
    # 8. 画图（ROC图）
    plt.figure(figsize=(8,6),facecolor='w')
    plt.plot(forest_fpr1,forest_tpr1,c='r',lw=2,label=u'Hinselmann目标属性,AUC=%.3f' % auc1)
    plt.plot(forest_fpr2,forest_tpr2,c='b',lw=2,label=u'Schiller目标属性,AUC=%.3f' %auc2)
    plt.plot(forest_fpr3,forest_tpr3,c='g',lw=2,label=u'Citology目标属性,AUC=%.3f' %auc3)
    plt.plot(forest_fpr4,forest_tpr4,c='y',lw=2,label=u'Biopsy目标属性,AUC=%.3f' % auc4)
    plt.plot((0,1),(0,1),c='#a0a0a0',lw=2,ls='--')
    plt.xlim(-0.001,1.001)
    plt.ylim(-0.001,1.001)
    plt.xticks(np.arange(0,1.1,0.1))
    plt.yticks(np.arange(0,1.1,0.1))
    plt.xlabel('False Positive Rate(FPR)',fontsize=16)
    plt.ylabel('True Positive Rate(TPR)',fontsize=16)
    plt.grid(b=True,ls=':')
    plt.legend(loc='lower right',fancybox=True,framealpha=0.8,fontsize=12)
    plt.title(u'随机森林多目标属性分类ROC曲线',fontsize=18)
    plt.show()

    # 比较不同树数目、树最大深度的情况下随机森林的正确率
    # 一般情况下，初始的随机森林树个数是100，深度1，如果需要我们再进行优化操作
    X_train2,X_test2,Y_train2,Y_test2=train_test_split(X,Y,test_size=0.5,random_state=0)
    print("训练样本数量%d，测试样本数量:%d" % (X_train2.shape[0],X_test2.shape[0]))
    # 比较
    estimates=[1,50,100,500]
    depth=[1,2,3,7,15]
    x1,x2=np.meshgrid(estimates,depth)
    err_list=[]
    for es in estimates:
        es_list=[]
        for d in depth:
            tf=RandomForestClassifier(n_estimators=es,criterion='gini',max_depth=d,max_features=None,random_state=0)
            tf.fit(X_train2,Y_train2)
            st=tf.score(X_test2,Y_test2)
            err=1-st
            es_list.append(err)
            print("%d决策树数目，%d最大深度，正确率:%.2f%%" % (es,d,st*100))
        err_list.append(es_list)
    # 画图
    plt.figure(facecolor='w')
    i=0
    colors=['r','b','g','y']
    lw=[1,2,4,3]
    max_err=0
    min_err=100
    for es,err in zip(estimates,err_list):
        plt.plot(depth,err,c=colors[i],lw=lw[i],label=u'树数目:%d' % es)
        max_err=max((max(err),max_err))
        min_err=min((min(err),min_err))
        i+=1
    plt.xlim(min(depth),max(depth))
    plt.ylim(min_err*0.99,max_err*1.01)
    plt.xlabel(u'树深度',fontsize=16)
    plt.ylabel(u'错误率',fontsize=16)
    plt.legend(loc='upper left',fancybox=True,framealpha=0.8,fontsize=12)
    plt.grid(True)
    plt.title(u'随机森林中树数目、深度和错误率的关系图',fontsize=18)
    plt.show()


run()