#coding=utf-8
'''
用回归树来预测kaggle上的泰坦尼克号数据集
'''
import pandas as pd
import numpy as np
from sklearn.tree import DecisionTreeClassifier
import matplotlib
import matplotlib.pyplot as plt
from sklearn.model_selection import GridSearchCV,train_test_split,cross_val_score

pd.set_option('display.width',200) #不间断换行输出
pd.set_option('display.max_columns',200) #显示所有标签
matplotlib.use('TkAgg')

data = pd.read_csv(r"E:\python_project\sklearn_learn\DT\DecisionTreeDT\DecisionTreeClassifierDT\Titanicdata.csv")
# print(data.info()) #快速查看数据特征
# print(data.head(10)) #显示前n行


data.drop(['Cabin','Name','Ticket'],inplace=True,axis=1)#删除无用数据

data["Age"].fillna(data['Age'].mean(),inplace=True)#处理缺失值
data.dropna(inplace=True)

temp = data["Embarked"].unique().tolist() #将文字转为分类标签
data["Embarked"] = data["Embarked"].apply(lambda x: temp.index(x))
data.loc[:,'Sex'] = (data["Sex"] == "male").astype("int")


##划分训练集测试集
x = data.iloc[:,data.columns != "Survived"]
y = data.iloc[:,data.columns == "Survived"]

Xtrain , Xtest ,Ytrain ,Ytest = train_test_split(x,y,test_size=0.3)
Xtrain.index = range(Xtrain.shape[0])
Xtest.index = range(Xtest.shape[0])
Ytrain.index = range(Ytrain.shape[0])
Ytest.index = range(Ytest.shape[0])

#建立模型
clf = DecisionTreeClassifier(random_state=25)
clf = clf.fit(Xtrain,Ytrain)
score = clf.score(Xtest,Ytest)
print(score)   #0.7078651685393258


#测试是否过拟合
tr = []
te = []
for i in range(10):
    clf = DecisionTreeClassifier(random_state=25
                                 ,max_depth=i+1
                                 ,criterion="entropy"
                                 )
    clf = clf.fit(Xtrain,Ytrain)
    score_tr = clf.score(Xtrain,Ytrain)
    score_te = cross_val_score(clf,x,y,cv=10).mean()
    tr.append(score_tr)
    te.append(score_te)
print(max(te)) #0.8166624106230849
plt.plot(range(1,11),tr,color="red",label="train")
plt.plot(range(1,11),te,color="blue",label="test")
plt.xticks(range(1,11))
plt.legend()
plt.show()


#网格搜索
gini_threholds = np.linspace(0,0.5,50) #基尼系数的范围
entropy_threholds = np.linspace(0,1,50) #entropy系数的范围
parameters = {"criterion":("gini","entropy")
              ,"splitter":("best","random")
              ,"max_depth":[*range(1,10)]
              ,"min_samples_leaf":[*range(1,50,5)]
              ,"min_impurity_decrease":[*np.linspace(0,0.5,20)]  #信息增益的最小值
              }
clf = DecisionTreeClassifier(random_state=25)
GS = GridSearchCV(clf,parameters ,cv=10)
GS = GS.fit(Xtrain,Ytrain)

print(GS.best_params_) #结果{'criterion': 'entropy', 'max_depth': 3, 'min_impurity_decrease': 0.0, 'min_samples_leaf': 1, 'splitter': 'best'}
print(GS.best_score_)  #