#!/usr/bin/env python3
#
#sklearn 中泰坦尼克号 kaggle 案例 
#
# 特征部分
#

import pandas
from sklearn.ensemble import GradientBoostingClassifier #机器学习库-迭代决策树
from sklearn.ensemble import RandomForestClassifier     #机器学习库-随机森林
from sklearn.linear_model import LinearRegression       #机器学习库-线性回归
from sklearn.linear_model import LogisticRegression     #机器学习库-逻辑回归
from sklearn.cross_validation import KFold              #机器学习库-交叉验证
from sklearn import cross_validation   #<---这部分已经在2.0中被废弃掉，使用model_selection 代替cross_validation
import numpy as np
import re
from sklearn.feature_selection import SelectKBest,f_classif
import matplotlib.pyplot as plt

titanic = pandas.read_csv("train.csv")

#Age列讲缺失的补充，原有的数据均值进行填充
titanic["Age"] = titanic["Age"].fillna(titanic["Age"].median())

#将str 转成 int 
titanic.loc[titanic["Sex"] == "male","Sex"] = 0
titanic.loc[titanic["Sex"] == "female","Sex"] = 1
titanic["Embarked"] = titanic["Embarked"].fillna('S')
titanic.loc[titanic["Embarked"] == "S","Embarked"] = 0
titanic.loc[titanic["Embarked"] == "C","Embarked"] = 1
titanic.loc[titanic["Embarked"] == "Q","Embarked"] = 2

#增加特征
#增加家庭的人员体量
titanic["familysize"] = titanic["SibSp"] + titanic["Parch"]
#增加称呼
titanic["NameLength"] = titanic["Name"].apply(lambda x : len(x))

#在数据名称中，缺德相关头衔
def get_title(name):
	title_search = re.search(' ([A-Za-z]+)\.',name)
	if title_search:
		return title_search.group(1)
	return ""
titles = titanic["Name"].apply(get_title)
#统计有多少头衔，具体头衔每个的数量
#print(pandas.value_counts(titles))

title_mapping = { 'Mr':1,'Miss':2,'Mrs':3,'Master':4,'Dr':5,'Rev':6,'Major':7,'Col':8,'Mlle':9,'Capt':10,'Sir':11,'Mme':12,'Don':13,'Ms':14,'Jonkheer':15,'Countess':16,'Lady':17}

for k,v in title_mapping.items():
	titles[titles == k] = v

#验证头衔转化后的结果
#print(pandas.value_counts(titles))

titanic["Title"] = titles

predictors = ["Pclass","Sex","Age","SibSp","Parch","Fare","Embarked","familysize","Title","NameLength"]

'''
该部分主要为了查看统计个元素的重要程度

selector =SelectKBest(f_classif,k = 5)
selector.fit(titanic[predictors],titanic["Survived"])
scores = -np.log10(selector.pvalues_)
#print(scores)

#使用matplotlib通过可视化查看重要性
plt.bar(range(len(predictors)),scores)
plt.xticks(range(len(predictors)),predictors,rotation='vertical')
plt.show()

'''
algorithms = [
	[GradientBoostingClassifier(random_state = 1 , n_estimators = 25 , max_depth = 3),predictors],
	[LogisticRegression(random_state = 1),predictors]
]

kf = KFold(titanic.shape[0],n_folds= 4,random_state=1)

predictions = []

for train , test in kf:
	train_target = titanic["Survived"].iloc[train]
	full_test_predictions = []
	for alg , predictors in algorithms:
		alg.fit(titanic[predictors].iloc[train,:],train_target)
		test_predictions = alg.predict_proba(titanic[predictors].iloc[test,:].astype(float))[:,1]
		full_test_predictions.append(test_predictions)
	test_predictions=(2*full_test_predictions[0]+5*full_test_predictions[1])/8
	test_predictions[test_predictions <= .5] = 0
	test_predictions[test_predictions > .5] = 1
	predictions.append(test_predictions)

predictions = np.concatenate(predictions,axis = 0)
accuracy=len(predictions[predictions==titanic['Survived']])/len(predictions)
print('准确度:',accuracy)