#!/user/bin/env python3 
# -*- coding: utf-8 -*-
"""
    __author__ = "wu" 
   Description :
"""

import re
import string

import numpy as np
import tensorflow as tf
import pandas as pd
import matplotlib.pyplot as plt
from tensorflow.keras import models, layers, preprocessing, optimizers, losses, metrics
from tensorflow.keras.layers.experimental.preprocessing import TextVectorization
import tensorflow_datasets as tfds


tf.keras.backend.clear_session()

MAX_WORDS = 10000
MAX_LEN = 200
BATCH_SIZE = 2


class CnnModel(models.Model):
    def __init__(self):
        super(CnnModel, self).__init__()

    def build(self, input_shape):
        self.embedding = layers.Embedding(MAX_WORDS, 7, input_length=MAX_LEN)
        self.conv_1 = layers.Conv1D(16, kernel_size=5, name="conv_1", activation="relu")
        self.pool_1 = layers.MaxPool1D(name="pool_1")
        self.conv_2 = layers.Conv1D(128, kernel_size=3, name="conv_2", activation="relu")
        self.pool_2 = layers.MaxPool1D(name="pool_2")
        self.flatten = layers.Flatten()
        self.dense = layers.Dense(1, activation="sigmoid")
        super(CnnModel, self).build(input_shape)

    def call(self, x):

        x = self.embedding(x)
        x = self.conv_1(x)
        x = self.pool_1(x)
        x = self.conv_2(x)
        x = self.pool_2(x)
        x = self.flatten(x)
        x = self.dense(x)

        return x

    def summary(self):
        x_input = layers.Input(shape=MAX_LEN)
        output = self.call(x_input)
        model = tf.keras.Model(inputs=x_input, outputs=output)
        model.summary()


def split_line(line):

    arr = tf.strings.split(line, "\t")
    label = tf.expand_dims(tf.cast(tf.strings.to_number(arr[0]), tf.int32), axis=0)
    text = tf.expand_dims(arr[1], axis=0)

    return text, label


def clean_text(text):
    lowercase = tf.strings.lower(text)
    stripped_html = tf.strings.regex_replace(lowercase, "<br />", "")
    cleaned_punctuation = tf.strings.regex_replace(stripped_html,
                                                   "[%s]" % re.escape(string.punctuation), "")

    return cleaned_punctuation


@tf.function
def printbar():
    tooday_ts = tf.timestamp() % (24 * 60 * 60)
    hour = tf.cast(tooday_ts // 3600 + 8, tf.int32) % tf.constant(24)
    minite = tf.cast(tooday_ts % 3600, tf.int32)
    second = tf.cast(tf.floor(tooday_ts % 60), tf.int32)

    def timeformat(m):
        if tf.strings.length(tf.strings.format("{}", m)) == 1:
            return tf.strings.format("0{}", m)
        else:
            return tf.strings.format("{}", m)

    timestring = tf.strings.join([timeformat(hour), timeformat(minite), timeformat(second)],
                                 separator=":")
    tf.print("==" * 8 + timestring)


optimizer = optimizers.Nadam()
loss_func = losses.BinaryCrossentropy()

train_loss = metrics.Mean(name="train_loss")
train_metric = metrics.BinaryAccuracy(name="train_accuracy")

valid_loss = metrics.Mean(name="valid_loss")
valid_metric = metrics.BinaryAccuracy(name="valid_accuracy")


@tf.function
def train_step(model, features, labels):
    with tf.GradientTape() as tape:
        predictions = model(features, training=True)
        loss = loss_func(labels, predictions)

    gradients = tape.gradient(loss, model.trainable_variables)
    optimizer.apply_gradients(zip(gradients, model.trainable_variables))

    train_loss.update_state(loss)
    train_metric.update_state(labels, predictions)


@tf.function
def valid_step(model, features, labels):
    predictions = model(features, training=False)
    batch_loss = loss_func(labels, predictions)

    valid_loss.update_state(batch_loss)
    valid_metric.update_state(labels, predictions)


def train_model(model, ds_train, ds_valid, epochs):

    for epoch in tf.range(1, epochs + 1):
        for feautures, labels in ds_train:
            train_step(model, feautures, labels)

        for feautures, labels in ds_valid:
            valid_step(model, feautures, labels)

        logs = "Epoch={}, loss:{}, accuracy:{}, valid_loss:{}, valid_accuracy:{}"

        if epoch % 1 == 0:
            printbar()
            tf.print(tf.strings.format(
                logs, (epoch, train_loss.result(), train_metric.result(), valid_loss.result(), valid_metric.result())))
            tf.print("")

        train_loss.reset_states()
        train_metric.reset_states()
        valid_loss.reset_states()
        valid_metric.reset_states()


def evaluate_model(model, ds_valid):

    for features, labels in ds_valid:
        valid_step(model, features, labels)

    logs = "valid loss:{}, valid accuracy:{}"
    tf.print(tf.strings.format(logs, (valid_loss.result(), valid_metric.result())))

    valid_loss.resset_states()
    valid_metric.resset_states()

    train_metric.resset_states()


def main():

    train_data_path = r"..\data\imdb\train.csv"
    test_data_path = r"..\data\imdb\test.csv"
    df_train_raw = tf.data.TextLineDataset(filenames=[train_data_path])\
        .map(split_line, num_parallel_calls=tf.data.experimental.AUTOTUNE)\
        .shuffle(buffer_size=100).batch(BATCH_SIZE).prefetch(tf.data.experimental.AUTOTUNE)
    df_test_raw = tf.data.TextLineDataset(filenames=[test_data_path])\
        .map(split_line, num_parallel_calls=tf.data.experimental.AUTOTUNE)\
        .shuffle(buffer_size=100).batch(BATCH_SIZE).prefetch(tf.data.experimental.AUTOTUNE)

    vectorize_layer = TextVectorization(standardize=clean_text, split="whitespace",
                          max_tokens=MAX_WORDS - 1, output_mode="int",
                          output_sequence_length=MAX_LEN)

    ds_text = df_train_raw.map(lambda text, label: text)
    vectorize_layer.adapt(ds_text)
    print(vectorize_layer.get_vocabulary()[:100])

    ds_train = df_train_raw.map(lambda text, label: (vectorize_layer(text), label))\
        .prefetch(tf.data.experimental.AUTOTUNE)
    ds_test = df_test_raw.map(lambda text, label: (vectorize_layer(text), label))\
        .prefetch(tf.data.experimental.AUTOTUNE)

    model = CnnModel()
    model.build(input_shape=(None, MAX_LEN))
    model.summary()

    train_model(model, ds_train, ds_test, epochs=2)
    # 另外一种训练方式
    # model.compile(optimizer=optimizers.Nadam(), loss=losses.BinaryCrossentropy(),
    #               metrics=["accuracy"])
    # model.fit(ds_train, epochs=2)
    
    model.save("../model/tf_model_saved_model", save_format="tf")


if __name__ == '__main__':
    main()

