# -*- coding: utf-8 -*-
"""
Created on Sun Dec 13 22:53:10 2020

@author: HP
"""

#kaggle初级案例，泰坦尼克号
import pandas as pd
import numpy as np
import scipy.stats as ss
from statsmodels.stats.anova import anova_lm
from statsmodels.formula.api import ols
##所需函数
def table(x,y):
    type1=np.unique(x)
    type2=np.unique(y)
    result=np.zeros([len(type1),len(type2)])
    for i in range(result.shape[0]):
        for j in range(result.shape[1]):
            result[i,j]=np.sum(((x==type1[i]).astype("int") +(y==type2[j]).astype("int"))==2)
    return result


def anova(y,x,data):
    data_t=data.copy()
    data_t[y]=pd.factorize(data_t[y])[0]
    anova_reC= anova_lm(ols(x+'~'+y,data=data_t[[y,x]]).fit())
    p=anova_reC.as_matrix()[0,4]
    return p

def ticket(x):
    num=[]
    result=[]
    for i in range(10):
        num.append(str(i))
    for j in x:
        if(j[0] in num):
            result.append("NUM")
        else:
            result.append(j.split(" ")[0])
    return result


def ticket2(x):
    num=[]
    result=[]
    for i in range(10):
        num.append(str(i))
    for j in x:
        if(j[0] in num):
            if(len(j)==4 or len(j)==3):
                result.append(j[0:2])
            elif((len(j)==5) or (len(j)==6)):
                result.append(j[0:3])
            elif(len(j)==7):
                result.append(j[0:4])
        else:
            result.append(j.split(" ")[0])
    return result
##读取测试集和训练集
train=pd.read_csv("./train.csv")
test=pd.read_csv("./test.csv")
##统计缺失值
train.isna().sum(axis=0)
test.isna().sum(axis=0)
P=test.PassengerId.values
train=train.drop("PassengerId",axis=1)
test=test.drop("PassengerId",axis=1)
##缺失值填补和数据处理
'''
temp=np.ones_like(train.Cabin.values)
temp[train.Cabin.isna().values]=0
train.Cabin=temp
temp=np.ones_like(test.Cabin.values)
temp[test.Cabin.isna().values]=0
test.Cabin=temp
'''
train.Ticket=ticket2(train.Ticket.values)
train['Family']=train["SibSp"]+train['Parch']
train['Is_Alone']=train['Family'].apply(lambda x:1 if x==0 else 0)
test['Family']=test["SibSp"]+test['Parch']
test['Is_Alone']=test['Family'].apply(lambda x:1 if x==0 else 0)

###对Age进行处理
data_all_2=pd.concat([train,test])
all_f_name=data_all_2.Name.apply(lambda x:(x.split(".")[1]).split(' ')[1])
all_f_name=all_f_name.str.replace('\(','',regex=True)
all_f_name=all_f_name.str.replace('\)','',regex=True)
all_fill_a=data_all_2["Age"].groupby(by=all_f_name).mean()[all_f_name[data_all_2["Age"].isna().values]].values.copy()
data_all_2["Age"][data_all_2.Age.isna()]=all_fill_a
data_all_2.Age=data_all_2.Age.fillna(data_all_2.Age.mean())
age_buckets= [0,2,10,18,60,200]
age_labels = [0,1,2,3,4]
data_all_2['Age'] = pd.cut(data_all_2['Age'], bins=age_buckets, labels=age_labels, right=False)
##对Parch进行处理
parch_buckets= [0,1,200]
parch_labels = [0,1]
data_all_2['Parch'] = pd.cut(data_all_2['Parch'], bins=parch_buckets, labels=parch_labels, right=False)

#对SibSp进行处理
sibsp_buckets= [0,1,200]
sibsp_labels = [0,1]
data_all_2['SibSp'] = pd.cut(data_all_2['SibSp'], bins=sibsp_buckets, labels=sibsp_labels, right=False)

#对Fare进行处理
fare_buckets= [0,23,10000]
fare_labels = [0,1]
data_all_2['Fare'] = data_all_2['Fare'].fillna(data_all_2['Fare'].mean())
data_all_2['Fare'] = pd.cut(data_all_2['Fare'], bins=fare_buckets, labels=fare_labels, right=False)

# Ability to bargain
#data_all_2['Fare'] = data_all_2['Fare'].astype(np.int8)
data_all_2['Pclass'] = data_all_2['Pclass'].astype(np.int8)


##构建Ability特征
data_all_2['Ability'] = data_all_2['Fare'].values/ data_all_2['Pclass'].values
ab_buckets= [0,4,9,15,20,59,70,10000]
ab_labels = [0,1,2,3,4,5,6]
data_all_2['Ability'] = pd.cut(data_all_2['Ability'], bins=ab_buckets, labels=ab_labels, right=False)


data_all_2=data_all_2.drop(['Embarked','Cabin'],axis=1)

train=data_all_2.iloc[0:train.shape[0],:]
test=data_all_2.iloc[train.shape[0]:,:]
test=test.drop("Survived",axis=1)



##对姓名进行处理
train.Name=train.Name.apply(lambda x :x.split(",")[1]).apply(lambda x :x.split(".")[0].lstrip())##获取名字中间信息
test.Name=test.Name.apply(lambda x :x.split(",")[1]).apply(lambda x :x.split(".")[0].lstrip())





'''
###数据分析
##字符型数据进行哑变量处理
#连续性自变量和离散自变量与因变量的关系
discrete_variable=["Pclass","Cabin","Name","Sex","Is_Alone","Family","SibSp","Parch","Ticket","Ability","Age"]
continous_variable=["Age","Fare"]
y=train.Survived.values
result1=dict()
result2=dict()
for i in discrete_variable:
    x=train[i].values
    t=table(x,y)
    kf = ss.chi2_contingency(t)
    result1[i]=[kf[1]]

for j in continous_variable:
    result2[j]=[anova('Survived',j,train)]

result1=pd.DataFrame(result1)
result2=pd.DataFrame(result2)


##绘图分析绘图威力.以sex为例
#coding=utf-8

from pyecharts.charts.bar import Bar
columns = ["死亡","生存"]
data1=[]
data2=[]
for i in np.unique(train.Survived):
    temp2=train.iloc[y==i,:].copy()
    data1.append(temp2.iloc[temp2.Sex.values=="male",:].shape[0])
    data2.append(temp2.iloc[temp2.Sex.values=="female",:].shape[0])
    
    
    

bar=Bar("生存情况","")
bar.add("男性", columns, data1,is_label_show=True)
bar.add("女性", columns, data2,is_label_show=True)
bar.render("./b.html")
'''
###模型构建
##数据one_hot

train2=train.copy()
train_y=train2["Survived"].values
train2=train2.drop("Survived",axis=1)
test.Ticket=ticket2(test.Ticket.values)
data_all=pd.concat([train2,test])
data_all.index=np.arange(data_all.shape[0])


##data_all=data_all.drop("Ticket",axis=1)
oh_variable2=["Pclass","Name","Sex","Age","Fare","Ability","SibSp","Parch","Ticket"]
for j in oh_variable2:
    if(len(np.unique(data_all[j]))>2):
        temp=pd.get_dummies(data_all[j],prefix=j)
        data_all=pd.merge(data_all,temp,left_index=True,right_index=True)
        data_all=data_all.drop(j,axis=1)
    else:
        type1=np.unique(data_all[j])
        data_all[j]=data_all[j].apply(lambda x:1 if x==type1[0] else 0)
    
    

train_x=data_all.values[0:len(train_y),:]
test_x=data_all.values[len(train_y):,:]
###构建模型
from sklearn.ensemble import RandomForestClassifier
##from xgboost import XGBClassifier
from bayes_opt import BayesianOptimization
from sklearn.model_selection import cross_val_score
##clf = RandomForestClassifier(max_depth=8, random_state=0,min_samples_split=40,n_estimators=50)##max_depth=4, random_state=0,min_samples_split=10
##clf.fit(train_x,train_y)



#Bayesian optimization
def bayesian_optimization(dataset, function, parameters):
   train_x, train_y= dataset
   n_iterations = 100
   gp_params = {"alpha": 1e-4}

   BO = BayesianOptimization(function, parameters)
   BO.maximize(n_iter=n_iterations, **gp_params)

   return BO.max



def rbt_optimization(cv_splits):
    def function(m_p, m_s_s, n_s):
            return cross_val_score(
                   RandomForestClassifier(
                   max_depth=max(0,int(m_p)), random_state=0,min_samples_split=max(0,int(m_s_s)),n_estimators=max(0,int(n_s))),  
                   X=train_x, 
                   y=train_y, 
                   cv=cv_splits,
                   scoring="roc_auc",
                   n_jobs=-1).mean()

    parameters = {"m_p": (1, 150),
                  "m_s_s": (2, 40),
                  "n_s": (10, 700)}
    
    return function, parameters



dataset=[train_x,train_y]
f,p=rbt_optimization(2)
result1=bayesian_optimization(dataset, f, p)


#max_depth=147, random_state=0,min_samples_split=35,n_estimators=698
clf = RandomForestClassifier(max_depth=148, random_state=0,min_samples_split=39,n_estimators=288)##max_depth=30, random_state=0,min_samples_split=35,n=600
clf.fit(train_x,train_y)
y_p=clf.predict(test_x)
predict={}
predict['PassengerId']=P
predict['Survived']=y_p
result=pd.DataFrame(predict)
result.to_csv("./t2.csv",index=False)


##

##变量重要性程度
from pyecharts.charts.bar import Bar
columns=data_all.columns
importance=-np.sort(-clf.feature_importances_)[0:10]
columns=columns[np.argsort(-importance)][0:10]
importance=importance[np.argsort(-importance)][0:10]
importance=np.round(importance,3)
#前15
bar = Bar("变量重要性程度", "")
bar.add("",columns, importance,is_label_show=True,xaxis_rotate=-45)
bar.render()
