#!/usr/bin/env python
# coding: utf-8

# In[3]:


import pandas as pd
import numpy as np
import matplotlib.pyplot as plt


# In[4]:


data=pd.read_csv("./train.csv")


# In[5]:


data.columns


# In[6]:


data=data[['Survived', 'Pclass', 'Sex', 'Age','SibSp','Parch','Fare','Embarked']]


# In[7]:


data.head()


# In[8]:


data.info()


# In[9]:


data['Age']=data['Age'].fillna(data['Age'].mean())  #fillna()填充缺失值


# In[10]:


data.info()


# In[11]:


data.fillna(0,inplace=True)


# In[12]:


data.info()


# In[13]:


data['Sex']=[1 if x=='male' else 0 for x in data.Sex]


# In[14]:


data.head()


# In[15]:


data['P1']=np.array(data['Pclass']==1).astype(np.int32)
data['P2']=np.array(data['Pclass']==2).astype(np.int32)
data['P3']=np.array(data['Pclass']==3).astype(np.int32)  #独热编码


# In[16]:


data.head()


# In[17]:


del data['Pclass']


# In[18]:


data.head()


# In[19]:


data.Embarked.unique()


# In[20]:


data['e1']=np.array(data['Embarked']=='S').astype(np.int32)
data['e2']=np.array(data['Embarked']=='C').astype(np.int32)
data['e3']=np.array(data['Embarked']=='Q').astype(np.int32)
del data['Embarked']


# In[21]:


data.head()


# In[22]:


data.info()


# In[23]:


data.values.dtype


# In[24]:


data.columns


# In[33]:


data_train=data[['Sex', 'Age', 'SibSp', 'Parch', 'Fare', 'P1', 'P2', 'P3', 'e1', 'e2', 'e3']].values#特征


# In[34]:


data_train


# In[27]:


data_target=data['Survived'].values.reshape(-1,1)  #目标


# In[28]:


data_target


# In[109]:


np.shape(data_train),np.shape(data_target)  


# In[57]:


from sklearn.model_selection import train_test_split


# In[58]:


x_train,x_test,y_train,y_test=train_test_split(data_train,data_target,test_size=0.2) #0.2代表82划分，训练值8成，测试值2成


# In[59]:


from sklearn.tree import DecisionTreeClassifier #引入决策树的包


# In[60]:


model=DecisionTreeClassifier()  #初始化
model.fit(x_train,y_train)      #训练模型


# In[63]:


model.score(x_test,y_test)  #预测集的得分


# In[64]:


model.score(x_train,y_train)  #训练集的得分


# 可以看到以上训练集的数据得分比预测值的得分高，可以得出模型已过拟合了

# 所以我们现在进行深度

# In[65]:


def m_score(depth):
    model=DecisionTreeClassifier(max_depth=depth)
    model.fit(x_train,y_train)
    train_score=model.score(x_train,y_train)
    test_score=model.score(x_test,y_test)
    return train_score,test_score


# In[71]:


depths=range(2,15)
scores=[m_score(depth) for depth in depths]


# In[76]:


scores


# In[82]:


train_s=[s[0] for s in scores]


# In[83]:


test_s=[s[1] for s in scores]


# In[84]:


plt.plot(train_s)
plt.plot(test_s)


# In[85]:


def m_score(value):
    model=DecisionTreeClassifier(min_impurity_split=value)
    model.fit(x_train,y_train)
    train_score=model.score(x_train,y_train)
    test_score=model.score(x_test,y_test)
    return train_score,test_score


# In[87]:


values=np.linspace(0,0.5,50)
scores=[m_score(value) for value in values]


# In[88]:


scores


# In[89]:


train_sc=[s[0] for s in scores]
test_sc=[s[1] for s in scores]


# In[91]:


plt.plot(train_sc)
plt.plot(test_sc)


# In[92]:


best_index=np.argmax(test_sc)  #预测分数最高的位置索引


# In[93]:


best_index


# In[94]:


dest_score=test_sc[best_index] #预测最高的分数是多少


# In[95]:


dest_score


# In[96]:


dest_value=values[best_index]#取出对应的阈值


# In[97]:


dest_value


# In[98]:


dest_score,dest_value  #就是当value阈值取0.16326时可以得到一个最高分


# In[ ]:





# 为了解决随机划分的差异

# 我们需要做交叉验证

# In[99]:


from sklearn.model_selection import GridSearchCV  #引入CV包


# In[100]:


values=np.linspace(0,0.5,50)
depths=range(2,15)


# In[101]:


param_grid={'max_depth':depths,'min_impurity_split':values} #参数


# In[103]:


model=GridSearchCV(DecisionTreeClassifier(),param_grid,cv=5) #第一个是代表分类器模型初始化，第二个代表参数取值，cv是循环划分5次交叉认证


# In[104]:


model.fit(data_train,data_target)  #模型训练，这个是直接把x与y放进去，不用把划分的放进去，它会自动划分


# In[105]:


model.best_params_  #得出最好的参数，得分最高的参数与阈值


# In[107]:


model.best_score_ #得出最高分数


# In[ ]:




