#!/usr/bin/env python3
#
#sklearn 中泰坦尼克号 kaggle 案例 
#
# 随机分离 
#

import pandas
from sklearn.ensemble import RandomForestClassifier
from sklearn import cross_validation   #<---这部分已经在2.0中被废弃掉，使用model_selection 代替cross_validation
import numpy as np

titanic = pandas.read_csv("train.csv")

#Age列讲缺失的补充，原有的数据均值进行填充
titanic["Age"] = titanic["Age"].fillna(titanic["Age"].median())

#将str 转成 int 
titanic.loc[titanic["Sex"] == "male","Sex"] = 0
titanic.loc[titanic["Sex"] == "female","Sex"] = 1
titanic["Embarked"] = titanic["Embarked"].fillna('S')
titanic.loc[titanic["Embarked"] == "S","Embarked"] = 0
titanic.loc[titanic["Embarked"] == "C","Embarked"] = 1
titanic.loc[titanic["Embarked"] == "Q","Embarked"] = 2

predictors = ["Pclass","Sex","Age","SibSp","Parch","Fare","Embarked"]

#设定分离的规则
#n_estimators 指构建多少棵逻辑树进行分离 《--关键
#min_samples_split 定义到多少层级进行定制建立树关系，可理解为分支
#min_samples_leaf 定义多少叶子后停止 
#上面两个标量可作为设定关系
alg = RandomForestClassifier(random_state = 1 , n_estimators = 50 , min_samples_split = 4 , min_samples_leaf = 2)

kf = cross_validation.KFold(titanic.shape[0] , n_folds=3 , random_state = 1)

scores = cross_validation.cross_val_score(alg,titanic[predictors],titanic["Survived"],cv = kf)

print("预测：",scores.mean())