#!/usr/bin/env python
# coding: utf-8

# In[1]:


# 引入需要用的库
get_ipython().run_line_magic('matplotlib', 'inline')
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.linear_model import LinearRegression   # 导入线性回归
from sklearn.model_selection import KFold  # 导入交叉验证
from sklearn.linear_model import LogisticRegression     # 导入逻辑验证
from sklearn.ensemble import RandomForestClassifier     # 导入随机森林
from sklearn.feature_selection import SelectKBest, f_classif    # 选择最佳特征
import re
import warnings
warnings.filterwarnings('ignore')


# In[2]:


#导入数据
train = pd.read_csv("train.csv")
test = pd.read_csv("test.csv")


# In[3]:


train.head()


# In[4]:


test.head()


# In[5]:


train.describe()


# In[6]:


train.info()  


# In[7]:


test.info()


# In[8]:


train.describe(include=['O']) #参数O代表只列出object类型的列


# In[9]:


train['Survived'].value_counts()


# In[10]:


sns.barplot(x="Sex", y="Survived", data=train)


# In[11]:


sns.barplot(x="Pclass", y="Survived", data=train)


# In[12]:


sns.barplot(x="SibSp", y="Survived", data=train)


# In[13]:


sns.barplot(x="Parch", y="Survived", data=train)


# In[14]:


facet = sns.FacetGrid(train, hue="Survived",aspect=2)
facet.map(sns.kdeplot,'Age',shade= True)
facet.set(xlim=(0, train['Age'].max()))
facet.add_legend()
plt.xlabel('Age') 
plt.ylabel('density') 


# In[15]:


sns.countplot('Embarked',hue='Survived',data=train)


# In[16]:


age_median = train['Age'].median() # 中位数
train.loc[train['Age'].isnull(),'Age'] = age_median #把所有的空值换成中位数
train.info()#看看情况怎么样


# In[17]:


# 把机器学习不能处理的字符值转换成机器学习可以处理的数值
# .loc 通过自定义索引获取数据 , 其中 .loc[:,:]中括号里面逗号前面的表示行，逗号后面的表示列
train.loc[train["Sex"] == "male", "Sex"] = 0
train.loc[train["Sex"] == "female", "Sex"] = 1


# In[18]:


# 通过统计三个登船地点人数最多的填充缺失值
train["Embarked"] = train["Embarked"].fillna("S")
# 字符处理
train.loc[train["Embarked"] == "S", "Embarked"] = 0
train.loc[train["Embarked"] == "C", "Embarked"] = 1
train.loc[train["Embarked"] == "Q", "Embarked"] = 2
train.info()


# In[19]:


# 选择清洗过的特征
predictors = ["Pclass", "Sex", "Age", "SibSp", "Parch", "Fare", "Embarked"]
# 将m个样本平均分成11份进行交叉验证
kf = KFold(n_splits=11,random_state=1)

#导入线性回归
alg = LinearRegression()
predictions = []
for train_L, test_L in kf.split(train):
    #train_predictors选取训练用的特征
    train_predictors = (train[predictors].iloc[train_L, :])
    # train_target选取了目标特征，这里是Survived
    train_target = train["Survived"].iloc[train_L]
    # 丢进线性回归模型进行训练
    alg.fit(train_predictors, train_target)
    # 用剩下的一份数据作为测试集进行预测
    test_predictions = alg.predict(train[predictors].iloc[test_L, :])
    predictions.append(test_predictions)

# 使用线性回归得到的结果是在区间[0,1]上的某个值，需要将该值转换成0或1
predictions = np.concatenate(predictions, axis=0)
predictions[predictions > .5] = 1
predictions[predictions <= .5] = 0

# 查看模型准确率
accuracy = sum(predictions == train["Survived"]) / len(predictions)
print(accuracy)


# In[20]:


from sklearn.model_selection import cross_val_score
#导入逻辑回归模型
alg = LogisticRegression(random_state=1)
# 交叉验证
scores = cross_val_score(alg, train[predictors], train["Survived"], cv=11)
# 取scores的平均值
print(scores.mean())


# In[21]:


#导入随机森林模型
alg = RandomForestClassifier(random_state = 10, warm_start = True, 
                                  n_estimators = 26,
                                  max_depth = 6, 
                                  max_features = 'sqrt')
kf = KFold(n_splits=11,random_state=1)
# 交叉验证
scores = cross_val_score(alg, train[predictors], train["Survived"], cv=kf)
print(scores.mean())


# In[22]:


# 查看不同Name的称呼的获救情况
train['Title'] = train['Name'].apply(lambda x:x.split(',')[1].split('.')[0].strip())
plt.figure(figsize=(20, 8))#调整图的大小（长，宽）
sns.barplot(x="Title", y="Survived", data=train)


# In[23]:


Title_Dict = {}
Title_Dict.update(dict.fromkeys(['Capt', 'Col', 'Major', 'Dr', 'Rev'], '4'))
Title_Dict.update(dict.fromkeys(['Don', 'Sir', 'the Countess', 'Dona', 'Lady'], '1'))
Title_Dict.update(dict.fromkeys(['Mme', 'Ms', 'Mrs'], '0'))
Title_Dict.update(dict.fromkeys(['Mlle', 'Miss'], '2'))
Title_Dict.update(dict.fromkeys(['Mr'], '5'))
Title_Dict.update(dict.fromkeys(['Master','Jonkheer'], '3'))
 
train['Title'] = train['Title'].map(Title_Dict)
sns.barplot(x="Title", y="Survived", data=train)


# In[24]:


train['FamilySize']=train['SibSp']+train['Parch']+1
sns.barplot(x="FamilySize", y="Survived", data=train)


# In[25]:


def Fam_label(s):
    if (s >= 2) & (s <= 4):
        return 2
    elif ((s > 4) & (s <= 7)) | (s == 1):
        return 1
    elif (s > 7):
        return 0
train['FamilyLabel']=train['FamilySize'].apply(Fam_label)
sns.barplot(x="FamilyLabel", y="Survived", data=train)

train.drop(['FamilySize'],axis=1, inplace=True)#删除FamilySize这一列


# In[26]:


train['Cabin'] = train['Cabin'].fillna('Unknown')
train['Deck']=train['Cabin'].str.get(0)
sns.barplot(x="Deck", y="Survived", data=train)


# In[27]:


def Deck_Group(s):
    if (s == 'E') | (s == 'D') | (s == 'B'):
        return 3
    elif ((s == 'C') | (s == 'F')) | (s == 'G') | (s == 'A'):
        return 2
    elif (s == 'U'):
        return 1
    else:
        return 0

train['Deck_Group'] = train['Deck'].apply(Deck_Group)
sns.barplot(x='Deck_Group', y='Survived', data=train)


# In[28]:


Ticket_Count = dict(train['Ticket'].value_counts())
train['TicketGroup'] = train['Ticket'].apply(lambda x:Ticket_Count[x])
sns.barplot(x='TicketGroup', y='Survived', data=train)


# In[29]:


def Ticket_Label(s):
    if (s >= 2) & (s <= 4):
        return 2
    elif ((s > 4) & (s <= 8)) | (s == 1):
        return 1
    elif (s > 8):
        return 0

train['TicketGroup'] = train['TicketGroup'].apply(Ticket_Label)
sns.barplot(x='TicketGroup', y='Survived', data=train)


# In[30]:


train.head()


# In[62]:


predictors = ["Pclass", "Sex", "Age", "SibSp", "Fare","Parch", "Embarked", 
"FamilyLabel", "TicketGroup", "Title","Deck_Group"]
selector = SelectKBest(f_classif, k=5)
selector.fit(train[predictors], train["Survived"])

scores = -np.log10(selector.pvalues_)

# 画图看各个特征的重要程度
plt.bar(range(len(predictors)), scores)
plt.xticks(range(len(predictors)), predictors, rotation='vertical')
plt.show()


# In[61]:


predictors = ["Pclass", "Sex", "Age", "SibSp", "Fare","Parch", "Embarked", 
"FamilyLabel", "TicketGroup", "Title","Deck_Group"]

alg = RandomForestClassifier(random_state = 10, warm_start = True, 
                                  n_estimators = 26,
                                  max_depth = 6, 
                                  max_features = 'sqrt')

kf = KFold(n_splits=9,random_state=1)

scores = cross_val_score(alg, train[predictors], train["Survived"], cv=kf)
print(scores.mean())


# In[60]:


algorithms = [
    [RandomForestClassifier(random_state = 10, warm_start = True, 
                                  n_estimators = 26,
                                  max_depth = 6, 
                                  max_features = 'sqrt'),
     ['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare', 'Embarked', 'FamilyLabel', 'TicketGroup', 'Title','Deck_Group']],
    [LogisticRegression(random_state=1),
     ['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare', 'Embarked', 'FamilyLabel', 'TicketGroup', 'Title','Deck_Group']]
]

kf = KFold(n_splits=9,random_state=1)
predictions = []
for train_L, test_L in kf.split(train):
    train_target = train['Survived'].iloc[train_L]
    full_test_predictions = []
    for alg, predictors in algorithms:
        alg.fit(train[predictors].iloc[train_L, :], train_target)
        test_prediction = alg.predict_proba(train[predictors].iloc[test_L, :].astype(float))[:, 1]
        full_test_predictions.append(test_prediction)
    test_predictions = (full_test_predictions[0] + full_test_predictions[1]) / 2
    test_predictions[test_predictions > .5] = 1
    test_predictions[test_predictions <= .5] = 0
    predictions.append(test_predictions)
predictions = np.concatenate(predictions, axis=0)
accuracy = sum(predictions == train['Survived']) / len(predictions)  
print(accuracy)


# In[ ]:




