# -*- coding: utf-8 -*-
# -*- author:徐赞-*-

#头文件#
import numpy as np 
import pandas as pd 
import matplotlib.pyplot as plt
import seaborn as sns 
import warnings 
warnings.filterwarnings("ignore")

from sklearn.neighbors import KNeighborsClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import BaggingClassifier
from sklearn.ensemble import VotingClassifier
from sklearn.svm import SVC

from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import KFold
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import GridSearchCV

#引入头文件
train=pd.read_csv('E:/PROGRAMING/PYTHON/KAGGLE/Titanic/all/train.csv')
test=pd.read_csv('E:/PROGRAMING/PYTHON/KAGGLE/Titanic/all/test.csv')
full=pd.concat([train,test],ignore_index=True,sort=True)

'''
PassengerId => 乘客ID 
Pclass => 客舱等级(1/2/3等舱位) 
Name => 乘客姓名 
Sex => 性别 
Age => 年龄 
SibSp => 兄弟姐妹数/配偶数 
Parch => 父母数/子女数 
Ticket => 船票编号 
Fare => 船票价格 
Cabin => 客舱号 
Embarked => 登船港口
'''

#填充缺失值
full['Embarked'].fillna('S',inplace=True)
full['Fare'].fillna(full[full.Pclass==3]['Fare'].median(),inplace=True)
full.loc[full.Cabin.notnull(),'Cabin']=1
full.loc[full.Cabin.isnull(),'Cabin']=0
full['Title']=full['Name'].apply(lambda x:x.split(',')[1].split('.')[0].strip())#Braund, Mr. Owen Harris
nn={'Capt':'Rareman', 'Col':'Rareman','Don':'Rareman','Dona':'Rarewoman',
    'Dr':'Rareman','Jonkheer':'Rareman','Lady':'Rarewoman','Major':'Rareman',
    'Master':'Master','Miss':'Miss','Mlle':'Rarewoman','Mme':'Rarewoman',
    'Mr':'Mr','Mrs':'Mrs','Ms':'Rarewoman','Rev':'Mr','Sir':'Rareman',
    'the Countess':'Rarewoman'}
full.Title=full.Title.map(nn)
full['Age'].fillna(999,inplace=True)
def girl(aa):
	if (aa.Age!=999)&(aa.Title=='Miss')&(aa.Age<=14):
		return 'Girl'
	elif (aa.Age==999)&(aa.Title=='Miss')&(aa.Parch!=0):
		return 'Girl'
	else:
		return aa.Title
full['Title']=full.apply(girl,axis=1)
Tit=['Mr','Miss','Mrs','Master','Girl','Rareman','Rarewoman']
for i in Tit:
	full.loc[(full.Age==999)&(full.Title==i),'Age']=full.loc[full.Title==i,'Age'].mean()

#特征工程
full.loc[full.Age<=16.136,'AgeCut']=1
full.loc[(full.Age>16.136)&(full.Age<=32.102),'AgeCut']=2
full.loc[(full.Age>32.102)&(full.Age<=48.068),'AgeCut']=3
full.loc[(full.Age>48.068)&(full.Age<=64.034),'AgeCut']=4
full.loc[full.Age>64.034,'AgeCut']=5
full.loc[full.Fare<=7.854,'FareCut']=1
full.loc[(full.Fare>7.854)&(full.Fare<=10.5),'FareCut']=2
full.loc[(full.Fare>10.5)&(full.Fare<=21.558),'FareCut']=3
full.loc[(full.Fare>21.558)&(full.Fare<=41.579),'FareCut']=4
full.loc[full.Fare>41.579,'FareCut']=5
Tit=['Girl','Master','Mr','Miss','Mrs','Rareman','Rarewoman']
for i in Tit:
    for j in range(1,4):
        for g in range(0,10):
            if full.loc[(full.Title==i)&(full.Pclass==j)&(full.Parch==g)&(full.Survived.notnull()),'Survived'].mean()>=0.8:
                full.loc[(full.Title==i)&(full.Pclass==j)&(full.Parch==g),'TPP']=1
            elif full.loc[(full.Title==i)&(full.Pclass==j)&(full.Parch==g)&(full.Survived.notnull()),'Survived'].mean()>=0.5:
                full.loc[(full.Title==i)&(full.Pclass==j)&(full.Parch==g),'TPP']=2
            elif full.loc[(full.Title==i)&(full.Pclass==j)&(full.Parch==g)&(full.Survived.notnull()),'Survived'].mean()>=0:
                full.loc[(full.Title==i)&(full.Pclass==j)&(full.Parch==g),'TPP']=3
            else: 
                full.loc[(full.Title==i)&(full.Pclass==j)&(full.Parch==g),'TPP']=4
full.loc[(full.TPP==4)&(full.Sex=='female')&(full.Pclass!=3),'TPP']=1
full.loc[(full.TPP==4)&(full.Sex=='female')&(full.Pclass==3),'TPP']=2
full.loc[(full.TPP==4)&(full.Sex=='male')&(full.Pclass!=3),'TPP']=2
full.loc[(full.TPP==4)&(full.Sex=='male')&(full.Pclass==3),'TPP']=3
full.loc[full.Parch+full.SibSp>0,'Family']=1 
full.loc[full.Parch+full.SibSp==0,'Family']=0 
full.loc[(full.Title=='Mr')&(full.Pclass==1)&(full.Parch==0)&((full.SibSp==0)|(full.SibSp==1)),'MPPS']=1
full.loc[(full.Title=='Mr')&(full.Pclass!=1)&(full.Parch==0)&(full.SibSp==0),'MPPS']=2
full.loc[(full.Title=='Miss')&(full.Pclass==3)&(full.Parch==0)&(full.SibSp==0),'MPPS']=3
full.MPPS.fillna(4,inplace=True)

#建模评估
predictors=['Cabin','Embarked','Family','Pclass','Sex','Title','AgeCut','TPP','FareCut','Age','Fare','MPPS']
models=[KNeighborsClassifier(),LogisticRegression(),GaussianNB(),DecisionTreeClassifier(),RandomForestClassifier(),
       GradientBoostingClassifier(),SVC()]
full_dummies=pd.get_dummies(full[predictors])
X=full_dummies[:891]
y=full.Survived[:891]
test_X=full_dummies[891:]
scaler=StandardScaler()
X_scaled=scaler.fit(X).transform(X)
test_X_scaled=scaler.fit(X).transform(test_X)
'''
names=['KNN','LR','NB','Tree','RF','GDBT','SVC']
for name,model in zip(names,models):
	score=cross_val_score(model,X_scaled,y,cv=5)
	print(name,score.mean(),score)
'''
'''
model=RandomForestClassifier()
model.fit(X,y)
print(model.feature_importances_)
'''

#优化模型，从上文可以看到，LR、GDBT和SVC的效果是最好的
'''
param={'C':[0.03,0.032,0.04,0.034,0.036,0.039,0.041]}
grid_search=GridSearchCV(LogisticRegression(),param,cv=5)
grid_search.fit(X_scaled,y)
print('LR:',grid_search.best_params_,grid_search.best_score_)
#C=0.04,0.8215488215488216

param={'n_estimators':[30,40,20,35,25],'learning_rate':[0.1,0.5,1],'max_depth':[2,3,4]}
grid_search=GridSearchCV(GradientBoostingClassifier(),param,cv=5)
grid_search.fit(X_scaled,y)
print('GDBT:',grid_search.best_params_,grid_search.best_score_)
#30,1,3 

param={'C':[3.5,3,2.5,1],'gamma':[0.01,0.02,0.05,0.008]}
grid_search=GridSearchCV(SVC(),param,cv=5)
grid_search.fit(X_scaled,y)
print('SVC:',grid_search.best_params_,grid_search.best_score_)
#3,0.01
'''

#集成模型
bagging=BaggingClassifier(GradientBoostingClassifier(n_estimators=30,learning_rate=1,max_depth=3),n_estimators=50)
clf2=GradientBoostingClassifier(n_estimators=120,learning_rate=0.12,max_depth=4)
clf4=SVC(C=4,gamma=0.015,probability=True)
clf6=LogisticRegression(C=0.06)
eclfW_soft=VotingClassifier(estimators=[('GDBT',clf2),('SVC',clf4),('LR',clf6)],weights=[2,1.5,1],voting='soft')

models=[eclfW_soft,bagging]
names=['eclf_soft','Bagging']   
for name,model in zip(names,models):
	score=cross_val_score(model,X_scaled,y,cv=5)
	print(name,score.mean())

pred=bagging.fit(X_scaled,y).predict(test_X_scaled)
result=pd.DataFrame({'PassengerId':test.PassengerId,'Survived':pred})
result.to_csv('E:/PROGRAMING/PYTHON/KAGGLE/Titanic/result_final.csv',index=False)
