import tensorflow as tf
import numpy as np
import pandas as pd
from tensorflow.keras import models, layers
import json

# 训练集与数据集文件地址，可能需要根据您的数据文件位置进行修改
TRAIN_DATA_PATH = './train.csv'
TEST_DATA_PATH = './test.csv'
# 利用 pandas 读取数据，返回的是一个 DataFrame 对象，是一个表格化处理的数据
dftrain_raw = pd.read_csv(TRAIN_DATA_PATH)
dftest_raw = pd.read_csv(TEST_DATA_PATH)


def dataPreProcess(dfdata_in):
    # 创建一个新的DataFrame对象，用于存储清洗后的数据
    dfdata_process = pd.DataFrame()

    # Age属性处理 - 缺失值填充
    dfdata_process['Age'] = dfdata_in['Age'].fillna(dfdata_in['Age'].mean())
    # Fare属性处理 - 缺失值填充
    dfdata_process['Fare'] = dfdata_in['Fare'].fillna(dfdata_in['Fare'].mean())

    # one-hot编码
    embarked_onehot = pd.get_dummies(dfdata_in['Embarked'])
    embarked_onehot.columns = ['Embarked_' + str(x) for x in embarked_onehot.columns]
    dfdata_process = pd.concat([dfdata_process, embarked_onehot], axis=1)

    sex_onehot = pd.get_dummies(dfdata_in['Sex'])
    sex_onehot.columns = ['Sex_' + str(x) for x in sex_onehot.columns]
    dfdata_process = pd.concat([dfdata_process, sex_onehot], axis=1)

    # 属性筛选
    selected_cols = ['Pclass', 'SibSp', 'Parch']
    dfdata_process = pd.concat([dfdata_process, dfdata_in[selected_cols].copy()], axis=1)
    return dfdata_process


# step1. 数据预处理
dftrain_process = dataPreProcess(dftrain_raw)
dftest_process = dataPreProcess(dftest_raw)

# step2. 模型搭建
model = models.Sequential()
model.add(layers.Dense(15, activation='relu', input_shape=(10,)))
model.add(layers.Dense(15, activation='relu'))
model.add(layers.Dense(1, activation='sigmoid'))
model.summary()

# step3. 模型编译
model.compile(loss='mean_squared_error',
              optimizer=tf.keras.optimizers.SGD(learning_rate=1e-3),
              metrics=['accuracy'])

# step4. 模型训练
# 得到训练集中是否获救的数据,用于损失函数计算误差
dftrain_Servied = dftrain_raw['Survived']
history = model.fit(x=dftrain_process,  # 预测依据
                    y=dftrain_Servied,  # 预测结果
                    batch_size=32,  # 每批数据送入一部分数据
                    epochs=50,  # 一共训练50次
                    validation_split=0.2)  # 使用20%的数据作为验证集

# step5. 结果预测
# 用训练模型计算测试集中乘客的生还概率
predict_resulit = model.predict(dftest_process)
# 对predict_resulit这个array中每个大于0.5的元素替换为1，否则为0
predict_resulit = np.where(predict_resulit > 0.5, 1, 0)
pd_resulit = pd.DataFrame()
# predict_resulit原本为 [418,1]的二维数组，降维为一维数组再赋值为列
pd_resulit['Survived'] = predict_resulit.reshape(-1)
pd_resulit['PassengerId'] = dftest_raw['PassengerId']
# 加载验证结果
pd_groundtrue = pd.read_csv('./ground_truth.csv')
# 计算预测结果与真实结果每个元素是否相同，然后求平均值，得到预测结论
print(np.mean(np.equal(pd_resulit['Survived'].to_numpy(), pd_groundtrue['Survived'].to_numpy())))

# step6. 模型保存
model.save('./titanic_model.h5')
json.dump(history.history, open('history.json', 'w'))

import matplotlib.pyplot as plt


def plot_metric(historyDic, metric):
    train_metrics = historyDic[metric]
    val_metrics = historyDic['val_' + metric]
    epochs = range(1, len(train_metrics) + 1)
    plt.plot(epochs, train_metrics, 'bo--')
    plt.plot(epochs, val_metrics, 'rx-')
    plt.title('Training and validation ' + metric)
    plt.xlabel("Epochs")
    plt.ylabel(metric)
    plt.legend(["train_" + metric, 'val_' + metric])
    plt.savefig("".join('./' + metric + '.png'))
    plt.show()


#
# plot_metric(history.history, "loss")
# plot_metric(history.history, "auc")
#
# model.save('./titanic_model.h5')
# json.dump(history.history, open('history.json', 'w'))

model = models.load_model('./titanic_model.h5')
# 用训练模型计算测试集中乘客的生还概率
predict_resulit = model.predict(dftest_process)
# print(predict_resulit[-5:])
# 对predict_resulit这个array中每个大于0.5的元素替换为1，否则为0
predict_resulit = np.where(predict_resulit > 0.5, 1, 0)
# print(predict_resulit[-5:])

# pd_resulit = pd.DataFrame()
# # predict_resulit原本为 [418,1]的二维数组，降维为一维数组再赋值为列
# pd_resulit['Survived'] = predict_resulit.reshape(-1)
# pd_resulit['PassengerId'] = dftest_raw['PassengerId']
# # 打印最后5个预测结果
# print(pd_resulit)
# # 保存结果
# pd_resulit.to_csv('predict_result.csv',index=False)


# pd_resulit = pd.DataFrame()
# # predict_resulit原本为 [418,1]的二维数组，降维为一维数组再赋值为列
# pd_resulit['Survived'] = predict_resulit.reshape(-1)
# pd_resulit['PassengerId'] = dftest_raw['PassengerId']
# # 加载验证结果
# pd_groundtrue = pd.read_csv('./ground_truth.csv')
# # 计算预测结果与真实结果每个元素是否相同，然后求平均值
# print(np.mean(np.equal(pd_resulit['Survived'].to_numpy(), pd_groundtrue['Survived'].to_numpy())))
