import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
import numpy as np
from matplotlib import pyplot as plt
import warnings

warnings.filterwarnings('ignore')

def read_dataset(fname):
    #指定第一列作为行索引
    data = pd.read_csv(fname,index_col=0)
    #丢弃无用的数据
    data.drop(['Name','Ticket','Cabin'],axis=1,inplace=True)
    #处理性别数据
    data['Sex'] = (data['Sex'] == 'male').astype('int')
    #处理登船港口数据
    labels = data['Embarked'].unique().tolist()
    data['Embarked'] = data['Embarked'].apply(lambda n:labels.index(n))
    #处理缺失数据
    data = data.fillna(0)
    return data
    
train = read_dataset('train.csv')
y = train['Survived'].values #转换为ndarray格式
X = train.drop(['Survived'],axis=1).values #ndarray格式
X_train,X_test,Y_train,Y_test = train_test_split(X,y,
                                                 test_size = 0.2)
print('train dataset:{0};test dataset:{1}'.format(
        X_train.shape,X_test.shape))

clf = DecisionTreeClassifier()
clf.fit(X_train,Y_train)
train_score = clf.score(X_train,Y_train)
test_score = clf.score(X_test,Y_test)
print('\ntrain score: {0} ; test score : {1}'.format(
        train_score,test_score))

'''
train score: 0.9873595505617978 ; test score : 0.7653631284916201
可知 模型过拟合
对决策树进行剪枝
scikit-learn不支持后剪枝 下面通过max_depth参数限定决策树的深度
'''

#参数选择max_depth
def cv_score(d):
    clf = DecisionTreeClassifier(max_depth=d)
    clf.fit(X_train,Y_train)
    tr_score = clf.score(X_train,Y_train)
    cv_score = clf.score(X_test,Y_test)
    return (tr_score,cv_score)

depths = range(2,15)
scores = [cv_score(d) for d in depths]
tr_scores = [s[0] for s in scores]
cv_scores = [s[1] for s in scores]

#找出交叉验证数据集评分最高的索引
best_score_index = np.argmax(cv_scores)
best_score = cv_scores[best_score_index]
best_param = depths[best_score_index]
print('\nbest param:{0}; best score:{1}'.format(
        best_param,best_score))

#将模型参数和模型评分画出来
plt.figure(figsize=(6,4),dpi=80)
plt.grid()
plt.xlabel('max depth of decision trees')
plt.ylabel('score')
plt.plot(depths,cv_scores,'g-',label='cross-validation score')
plt.plot(depths,tr_scores,'r--',label='training score')
plt.legend()
plt.show()

'''
接下来考察min_impurity_split参数
这个参数指定信息熵或基尼不纯度的阈值
'''

#训练模型并计算评分
def cv_score(val):
    clf = DecisionTreeClassifier(criterion='gini',min_impurity_split=val)
    clf.fit(X_train,Y_train)
    tr_score = clf.score(X_train,Y_train)
    cv_score = clf.score(X_test,Y_test)
    return (tr_score,cv_score)

#指定参数范围，分别训练模型并计算评分
values = np.linspace(0,0.5,50)
scores = [cv_score(v) for v in values]
tr_scores = [s[0] for s in scores]
cv_scores = [s[1] for s in scores]

#找出评分最高的模型参数
best_score_index = np.argmax(cv_scores)
best_socre = cv_scores[best_score_index]
best_pararm = values[best_score_index]
print('\nbest param:{0}; best score:{1}'.format(
        best_score,best_pararm))

#画出模型参数与模型评分的关系
plt.figure(figsize=(10,6),dpi=70)
plt.grid()
plt.xlabel('threshold of entropy')
plt.ylabel('score')
plt.plot(values,cv_scores,'g-',label='cross-validation score')
plt.plot(values,tr_scores,'r--',label='training score')
plt.legend()

'''
使用GridSearchCV选择一个参数的最优解
'''
from sklearn.model_selection import GridSearchCV

thresholds = np.linspace(0,0.5,50)
#设置参数矩阵
param_grid = {'min_impurity_split':thresholds}

'''
关键参数 param_grid是一个字典
GridSearchCV会枚举列表中的所有值来构建模型，多次计算训练模型，计算模型评分
关键参数 cv 用来指定交叉验证数据集的生成规则
cv=5表示每次计算把数据结分成5份，其中的一份作为测试集，剩下的作为训练数据集
最终得到的最优参数和最优评分保存在clf.best_params_和clf.best_score_中
clf.cv_results_保存了计算过程的所有中间过程
'''
clf = GridSearchCV(DecisionTreeClassifier(),param_grid,cv=5)
clf.fit(X,y)
print('\nbest param:{0}; best score:{1}'.format(
        clf.best_params_,clf.best_score_))

def plot_curve(train_sizes, cv_results, xlabel):
    train_scores_mean = cv_results['mean_train_score']
    train_scores_std = cv_results['std_train_score']
    test_scores_mean = cv_results['mean_test_score']
    test_scores_std = cv_results['std_test_score']
    plt.figure(figsize=(10, 6), dpi=80)
    plt.title('parameters turning')
    plt.grid()
    plt.xlabel(xlabel)
    plt.ylabel('score')
    plt.fill_between(train_sizes, 
                     train_scores_mean - train_scores_std,
                     train_scores_mean + train_scores_std, 
                     alpha=0.1, color="r")
    plt.fill_between(train_sizes, 
                     test_scores_mean - test_scores_std,
                     test_scores_mean + test_scores_std, 
                     alpha=0.1, color="g")
    plt.plot(train_sizes, train_scores_mean, '.--', color="r",
             label="Training score")
    plt.plot(train_sizes, test_scores_mean, '.-', color="g",
             label="Cross-validation score")

    plt.legend(loc="best")
    
plot_curve(thresholds,clf.cv_results_,xlabel='gini thresholds')


#多组参数之间选择最优的参数
entropy_thresholds = np.linspace(0,1,50)
gini_thresholds = np.linspace(0,0.5,50)

#设置参数矩阵
param_grid = [{'criterion':['entropy'],'min_impurity_split':entropy_thresholds},
               {'criterion':['gini'],'min_impurity_split':gini_thresholds},
               {'max_depth':range(2,10)},
               {'min_samples_split':range(2,30,2)}]
clf = GridSearchCV(DecisionTreeClassifier(),param_grid,cv=5)
clf.fit(X,y)
print('\nbest param:{0}\nbest score:{1}'.format(clf.best_params_,
      clf.best_score_))










