import tensorflow as tf
from tensorflow.keras import models, layers, preprocessing, optimizers, losses, metrics
from tensorflow.keras.layers.experimental.preprocessing import TextVectorization
import re, string, os
from utils.time_print import printbar


class CnnModel(models.Model):
    """
    构建模型
    """
    def __init__(self):
        super(CnnModel, self).__init__()
        self.MAX_WORDS = 10000  # 仅考虑最高频的10000个词
        self.MAX_LEN = 200  # 每个样本保留200个词的长度

    def build(self, input_shape):
        self.embedding = layers.Embedding(self.MAX_WORDS, 7, input_length=self.MAX_LEN)
        self.conv_1 = layers.Conv1D(16, kernel_size=5, name='conv_1', activation='relu')
        self.pool_1 = layers.MaxPool1D(name='pool_1')
        self.conv_2 = layers.Conv1D(128, kernel_size=2, name='conv_2', activation='relu')
        self.pool_2 = layers.MaxPool1D(name='pool_2')
        self.flatten = layers.Flatten()
        self.dense = layers.Dense(1, activation='sigmoid')
        super(CnnModel, self).build(input_shape=input_shape)

    def call(self, x):
        x = self.embedding(x)
        x = self.conv_1(x)
        x = self.pool_1(x)
        x = self.conv_2(x)
        x = self.pool_2(x)
        x = self.flatten(x)
        x = self.dense(x)
        return x

    def summary(self):
        x_input = layers.Input(shape=self.MAX_LEN)
        out_put = self.call(x=x_input)
        model = tf.keras.Model(x_input, out_put)
        model.summary()


class ImdbModel(object):
    def __init__(self):
        self.MAX_WORDS = 10000  # 仅考虑最高频的10000个词
        self.MAX_LEN = 200  # 每个样本保留200个词的长度
        self.BATCH_SIZE = 20
        data_path = '../data/imdb'
        self.TRAIN_DATA_PATH = os.path.join(data_path, 'train.csv')
        self.TEST_DATA_PATH = os.path.join(data_path, 'test.csv')
        self.df_train, self.df_test = self.preprocess()

        self.optimizer = optimizers.Nadam()
        self.loss_func = losses.BinaryCrossentropy()
        self.train_loss = metrics.Mean(name='train_loss')
        self.train_metric = metrics.BinaryAccuracy(name='train_accuracy')
        self.valid_loss = metrics.Mean(name='valid_loss')
        self.valid_metric = metrics.BinaryAccuracy(name='valid_accuracy')

        self.model = CnnModel()

    def preprocess(self, buffer_size=1000):
        """
        预处理
        :param buffer_size:
        :return:
        """
        def split_line(line):
            """
            数据split，逐行逐列split
            :param line:
            :return:
            """
            arr = tf.strings.split(line, '\t')
            label = tf.expand_dims(tf.cast(tf.strings.to_number(arr[0]), tf.int32), axis=0)
            text = tf.expand_dims(arr[1], axis=0)
            return text, label

        def clean_text(text):
            """
            分词
            :param text:
            :return:
            """
            lowercase = tf.strings.lower(text)
            stripped_html = tf.strings.regex_replace(input=lowercase, pattern='<br />', rewrite=' ')
            return tf.strings.regex_replace(input=stripped_html, pattern='[{}]'.format(re.escape(string.punctuation)),
                                            rewrite='')

        df_train_raw = tf.data.TextLineDataset(filenames=[self.TRAIN_DATA_PATH]).\
            map(split_line, num_parallel_calls=tf.data.experimental.AUTOTUNE).\
            shuffle(buffer_size=buffer_size).\
            batch(batch_size=self.BATCH_SIZE).\
            prefetch(tf.data.experimental.AUTOTUNE)
        df_test_raw = tf.data.TextLineDataset(filenames=[self.TEST_DATA_PATH]).\
            map(split_line, num_parallel_calls=tf.data.experimental.AUTOTUNE).\
            batch(batch_size=self.BATCH_SIZE).\
            prefetch(tf.data.experimental.AUTOTUNE)

        # max_tokens 有一个留给占位符
        vectorize_layer = TextVectorization(standardize=clean_text, split='whitespace', max_tokens=self.MAX_LEN - 1,
                                            output_mode='int', output_sequence_length=self.MAX_LEN)
        df_train = df_train_raw.map(lambda text, label: (vectorize_layer(text), label)).\
            prefetch(tf.data.experimental.AUTOTUNE)

        df_test = df_test_raw.map(lambda text, label: (vectorize_layer(text), label)). \
            prefetch(tf.data.experimental.AUTOTUNE)
        return df_train, df_test

    @tf.function
    def train_step(self, features, labels):
        with tf.GradientTape() as tape:
            predicitions = self.model(features, training=True)
            loss = self.loss_func(labels, predicitions)
        gradients = tape.gradient(loss, self.model.trainable_variables)
        self.optimizer.apply_gradients(zip(gradients, self.model.trainable_variables))

        self.train_loss.update_state(loss)
        self.train_metric.update_state(labels, predicitions)

    @tf.function
    def valid_step(self, features, labels):
        predicitions = self.model(features, training=True)
        batch_loss = self.loss_func(labels, predicitions)

        self.valid_loss.update_state(batch_loss)
        self.valid_metric.update_state(labels, predicitions)

    def train_model(self, epochs):
        """
        训练模型
        :param epochs:
        :return:
        """
        for epoch in tf.range(1, epochs + 1):
            for features, label in self.df_train:
                self.train_step(features=features, labels=label)

            for features, label in self.df_test:
                self.valid_step(features=features, labels=label)

            logs = 'Epoch = {}, Loss: {}, Accuracy: {}, Valid Loss: {}, Valid Accuracy: {}'
            if epoch % 2 == 0:
                printbar()
                tf.print(tf.strings.format(logs,
                                           (epoch, self.train_loss.result(), self.train_metric.result(),
                                            self.valid_loss.result(), self.valid_metric.result())))
                tf.print('')

            self.train_loss.reset_states()
            self.valid_loss.reset_states()
            self.train_metric.reset_states()
            self.valid_metric.reset_states()

    def evalute_mode(self):
        """
        模型评估
        :return:
        """
        for features, labels in self.df_test:
            self.valid_step(features=features, labels=labels)
        logs = 'Valid Loss: {}, Valid Accuracy: {}'
        tf.print(tf.strings.format(logs,
                                   (self.valid_loss.result(), self.valid_metric.result())))
        self.valid_loss.reset_states()
        self.train_metric.reset_states()
        self.valid_metric.reset_states()

    def main(self):
        """
        for x, y in self.df_test.take(10):
            print(x, y)
        """
        # 构建模型
        self.model.build(input_shape=(None, self.MAX_LEN))
        # 训练模型
        self.train_model(epochs=20)
        # 评估模型
        self.evalute_mode()
        # predict
        predict = self.model.predict(self.df_test)
        print('predict: {}'.format(predict))
        # save and load
        """
        model_path = './data/models/tf_model_imdb'
        self.model.save(model_path, save_format="tf")
        model_loaded = tf.keras.models.load_model(model_path)
        """


def run():
    imdb_model = ImdbModel()
    imdb_model.main()


if __name__ == '__main__':
    run()
