# -*- coding: utf-8 -*-
# !/usr/bin/python3
"""
Author :      wu
Description :
"""

import datetime
import os

import tensorflow as tf
from tensorflow.keras import layers, models
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd

os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"


def prepare_df_data(df_raw: pd.DataFrame):
    df_data = df_raw.copy()
    df_data.columns = [x.lower() for x in df_data.columns]
    df_data = df_data.rename(columns={"survived": "label"})
    df_data = df_data.drop(columns=["passengerid", "name"])

    for col, dtype in dict(df_data.dtypes).items():
        if df_data[col].hasnans:
            df_data[col + "_nan"] = pd.isna(df_data[col]).astype("int32")
            if dtype not in [object, str, np.unicode_]:
                df_data[col].fillna(df_data[col].mean(), inplace=True)
            else:
                df_data[col].fillna("", inplace=True)

    return df_data


def df_to_dataset(df: pd.DataFrame, shuffle=True, batch_size=16):
    df_data = df.copy()

    if "label" not in df_data.columns:
        ds = tf.data.Dataset.from_tensor_slices(df_data.to_dict(orient="list"))
    else:
        labels = df_data.pop("label")
        ds = tf.data.Dataset.from_tensor_slices((df_data.to_dict(orient="list"), labels))

    if shuffle:
        ds = ds.shuffle(buffer_size=len(df_data))

    ds = ds.batch(batch_size)

    return ds


def build_feature_columns(df_data: pd.DataFrame):

    feature_columns = []

    # 数值列特征
    columns = ["age", "fare", "parch", "sibsp"] +\
              [i for i in df_data.columns if i.endswith("_nan")]
    for col in columns:
        feature_columns.append(tf.feature_column.numeric_column(col))

    # 分桶
    age = tf.feature_column.numeric_column("age")
    age_buckets = tf.feature_column.bucketized_column(age,
                                                      [18, 25, 30, 35, 40, 45, 50, 55, 60, 65]
                                                      )
    feature_columns.append(age_buckets)

    # 类别列
    sex = tf.feature_column.indicator_column(
        tf.feature_column.categorical_column_with_vocabulary_list(
            key="sex", vocabulary_list=["male", "female"]
        )
    )
    feature_columns.append(sex)

    pclass = tf.feature_column.indicator_column(
        tf.feature_column.categorical_column_with_vocabulary_list(
            key="pclass", vocabulary_list=[1, 2, 3]
        )
    )
    feature_columns.append(pclass)

    ticket = tf.feature_column.indicator_column(
        tf.feature_column.categorical_column_with_hash_bucket("ticket", 3)
    )
    feature_columns.append(ticket)

    embarked = tf.feature_column.indicator_column(
        tf.feature_column.categorical_column_with_vocabulary_list(
            key="embarked", vocabulary_list=["S", "C", "B"]
        )
    )
    feature_columns.append(embarked)

    # 嵌入列
    cabin = tf.feature_column.embedding_column(
        tf.feature_column.categorical_column_with_hash_bucket("cabin", 32), 2
    )
    feature_columns.append(cabin)

    # 交叉列
    pclass_rate = tf.feature_column.categorical_column_with_vocabulary_list(
        key="pclass", vocabulary_list=[1, 2, 3]
    )
    crossed_feature = tf.feature_column.indicator_column(
        tf.feature_column.crossed_column([age_buckets, pclass_rate], hash_bucket_size=15)
    )
    feature_columns.append(crossed_feature)

    return feature_columns


def plot_metric(history, metric: str):
    train_metrics = history.history[metric]
    val_metric = history.history["val_" + metric]

    epochs = range(1, len(train_metrics) + 1)
    plt.plot(epochs, train_metrics, "bo--")
    plt.plot(epochs, val_metric, "ro--")
    plt.title("training and validation " + metric)
    plt.xlabel("epochs")
    plt.ylabel(metric)
    plt.legend(["train_" + metric, "val_" + metric])
    plt.show()


def main():

    df = pd.read_csv("../data/titanic/train.csv")
    df = df.sample(frac=1).reset_index(drop=True)  # 打乱顺序

    df_train_raw = df.iloc[:int(len(df) * 0.7), :]
    df_test_raw = df.iloc[int(len(df) * 0.7):, :]

    df_raw = pd.concat([df_train_raw, df_test_raw])
    df_raw = prepare_df_data(df_raw)
    df_train = df_raw.iloc[:len(df_train_raw), :]
    df_test = df_raw.iloc[len(df_train_raw):, :]

    ds_train = df_to_dataset(df_train)
    ds_test = df_to_dataset(df_test)

    feature_columns = build_feature_columns(df_raw)

    tf.keras.backend.clear_session()
    model = tf.keras.Sequential([
        layers.DenseFeatures(feature_columns),
        layers.Dense(64, activation="relu"),
        layers.Dense(32, activation="relu"),
        layers.Dense(1, activation="sigmoid")
    ])
    model.compile(optimizer="Adam", loss="binary_crossentropy",
                  metrics=["accuracy"]
                  )
    history = model.fit(ds_train, validation_data=ds_test,
                        epochs=20)
    model.summary()
    plot_metric(history, "accuracy")


if __name__ == "__main__":
    main()
