import keras.initializers
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from sklearn import metrics

from sklearn.tree import DecisionTreeClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.neural_network import MLPClassifier

from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation
from keras import utils as np_utils

features = pd.read_csv("titanic_dataset.csv")
y_train = features['Survived']
x_train = features.drop('Survived',axis=1)

print(x_train.head(5))
print("x_train_shape={},y_train-shape={}".format(x_train.shape,y_train.shape))
print(x_train.isnull().sum())

#缺失数据处理 使用中值替换可以保证呈正态分布
sns.displot(x_train['Age'].dropna())
#plt.show()

x_train['Age'] = x_train['Age'].replace(np.nan,np.nanmedian(x_train['Age']))
sns.displot(x_train['Age'].dropna())
#plt.show()

#缺失太多的值，不会影响到结果，直接移除
x_train.drop('Cabin',axis=1,inplace=True)

sns.countplot(x='Embarked', data=x_train)
#plt.show()

#使用最大的数量的值来填充nan值
x_train['Embarked'] = x_train['Embarked'].replace(np.nan, 'S')

#Fare缺失的只有一行，可以先打印出来看看
print(x_train[np.isnan(x_train['Fare'])])

#缺失的有特征Pclass为3，Embarked为S，所以查询这类人群的Fare中值填充
plass3_fares = x_train.query('Pclass==3&Embarked=="S"')['Fare']
plass3_fares = plass3_fares.replace(np.nan,0)
median_fare = np.median(plass3_fares)
x_train.loc[x_train['PassengerId']==1044,'Fare'] = median_fare

#把男 女处理成数值表示
x_train['Sex'] = x_train['Sex'].replace(['male','female'],['1','0']).astype(int)

print(x_train.isnull().sum())
print(x_train.head(5))

#转换成虚拟变量，0和1，形成矩阵表，1为自身类别，0为其他类别
x_train = pd.get_dummies(x_train)

print("x_train_shape={},y_train-shape={}".format(x_train.shape,y_train.shape))

train_x,test_x,train_y,test_y = train_test_split(x_train,y_train,test_size=0.2,random_state=42,shuffle=True)
print("train_X.shape={},train_y.shape={}".format(train_x.shape, train_y.shape))
print("test_X.shape={},test_y.shape={}".format(test_x.shape, test_y.shape))


def TrainByModel(model):
    model.fit(train_x, train_y)

    # 预测
    train_pred = model.predict(train_x)
    test_pred = model.predict(test_x)

    train_accuracy = accuracy_score(train_y, train_pred)
    test_accuracy = accuracy_score(test_y, test_pred)

    print('The training accuracy is {}.'.format(train_accuracy))
    print('The test accuracy is {}'.format(test_accuracy))

    # ROC曲线分值
    y_score_dt = model.predict_proba(test_x)
    fpr, tpr, thresholds = metrics.roc_curve(test_y, y_score_dt[:, 1])
    print('Decision Tree Classifier AUC is: {:.3f}'.format(metrics.roc_auc_score(test_y, y_score_dt[:, 1])))

    return fpr,tpr

# #决策树模型 训练
# dt_f,dt_t = TrainByModel(DecisionTreeClassifier())
# #基于逻辑回归模型
# lr_f,lr_t = TrainByModel(LogisticRegression(max_iter=100000))
# #基于梯度提升分类器模型
# gb_f,gb_t = TrainByModel(GradientBoostingClassifier(n_estimators=500))
# #基于神经网络模型
# mc_f,mc_t = TrainByModel(MLPClassifier(hidden_layer_sizes=128, batch_size=64,max_iter=1000,solver='adam'))
#
# fig = plt.figure(figsize=(20,10))
# ax = fig.add_subplot(111)
#
# ax1 = ax.plot(dt_f,dt_t,c='c',lw=2)
# ax2 = ax.plot(lr_f,lr_t,c='y',lw=2)
# ax3 = ax.plot(gb_f,gb_t,c='r',lw=2)
# ax4 = ax.plot(mc_f,mc_t,c='b',lw=2)
# ax.grid()
# lns = ax1 + ax2 + ax3 + ax4
# ax.legend(lns,loc=0,labels=["DecisionTreeClassifier", "LogisticRegression","GradientBoostingClassifier","MLPClassifier"])
# plt.show()


def createKerasModle(x, y):
    #创建模型
    model = Sequential()
    #initializer使用截断正态分布
    initializers = keras.initializers.TruncatedNormal(mean=0.0, stddev=0.05, seed=None)
    #输入层维度X,shape[1]，有128个单元
    model.add(Dense(input_dim=x.shape[1], units=128, kernel_initializer=initializers, bias_initializer='zeros'))
    #添加ReLU激活层
    model.add(Activation('relu'))
    #添加Dropout层
    model.add(Dropout(0.2))
    #添加全链接层
    model.add(Dense(32))
    model.add(Activation('relu'))
    model.add(Dense(2))
    #输出的结果要么是1，要么是0，所以使用sigmoid激活函数
    model.add(Activation('sigmoid'))
    #编译使用二进制交叉嫡，adam优化器自行调整
    model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
    #将训练数据的y进行独编码
    y_train_categorical = np_utils.to_categorical(y)
    #训练模型，epochs表示要训练150次，verbose表示训练每批次时输出日志信息
    model.fit(x.values.astype('float64'), y_train_categorical, epochs=150, verbose=1)
    return model

keras_model = createKerasModle(train_x, train_y)
y_test_categorical = np_utils.to_categorical(test_y)
loss_and_accuracy = keras_model.evaluate(test_x.values.astype('float64'), y_test_categorical)
print("Loss={},Accuracy={}.".format(loss_and_accuracy[0], loss_and_accuracy[1]))

predictions = keras_model.predict(test_x.values.astype("float64"))
#将样本最大概率归属类别的值转化为样本的预测数组
predictions = np.argmax(predictions, axis=1)

submission = pd.DataFrame({
    "PassengerID": test_x["PassengerId"],
    "Survived": predictions
})

print(submission[0:15])