import matplotlib.pyplot as plt
import datetime
import tensorflow as tf
from tensorflow.keras import layers, backend, models, callbacks


def load_data_line(batch_size=20, buffer_size=1000):
    """
    数据加载 & 构建管道
    :param batch_size:
    :return:
    """
    def parse_line(line):
        """
        构建管道
        :param line:
        :return:
        """
        numbers = tf.strings.split(line, '\t')
        label = tf.reshape(tf.cast(tf.strings.to_number(numbers[0]), tf.int32), (-1,))
        features = tf.cast(tf.strings.to_number(tf.strings.split(numbers[1], ' ')), tf.int32)
        return (features, label)

    train_token_path = '../data/imdb/train_token.csv'
    test_token_path = '../data/imdb/test_token.csv'

    ds_train = tf.data.TextLineDataset(filenames=[train_token_path]).\
        map(parse_line, num_parallel_calls=tf.data.experimental.AUTOTUNE).\
        shuffle(buffer_size=buffer_size).\
        batch(batch_size=batch_size).\
        prefetch(tf.data.experimental.AUTOTUNE)
    ds_test = tf.data.TextLineDataset(filenames=[test_token_path]). \
        map(parse_line, num_parallel_calls=tf.data.experimental.AUTOTUNE). \
        shuffle(buffer_size=buffer_size). \
        batch(batch_size=batch_size). \
        prefetch(tf.data.experimental.AUTOTUNE)

    return ds_train, ds_test


def plot_metric(history, metric):
    """

    :param history:
    :param metric:
    :return:
    """
    train_metrics = history.history[metric]
    val_metrics = history.history['val_' + metric]

    epochs = range(1, len(train_metrics) + 1)

    plt.plot(epochs, train_metrics, 'bo--')
    plt.plot(epochs, val_metrics, 'ro--')

    plt.title('Training and validation ' + metric)

    plt.xlabel('Epochs')
    plt.ylabel(metric)

    plt.legend(['train_' + metric, 'val_' + metric])
    plt.show()


def build_model_sequential(train_data, test_data, max_words=10000, max_len=200):
    """
    方法一：Sequential按层顺序创建模型
    :param train_data: 
    :param test_data: 
    :param max_words: 只考虑top 10000 的词
    :param max_len:  只考虑最长200
    :return: 
    """
    def build_model():
        """
        Sequential构建模型
        :return:
        """
        # 清空会话
        backend.clear_session()

        model = models.Sequential()

        model.add(layers.Embedding(input_dim=max_words, output_dim=7, input_length=max_len))
        model.add(layers.Conv1D(filters=64, kernel_size=5, activation='relu'))
        model.add(layers.MaxPool1D(2))
        model.add(layers.Conv1D(filters=32, kernel_size=3, activation='relu'))
        model.add(layers.MaxPool1D(2))
        model.add(layers.Flatten())
        model.add(layers.Dense(1, activation='sigmoid'))

        model.compile(optimizer='Nadam', loss='binary_crossentropy', metrics=['accuracy', 'AUC'])
        return model

    model = build_model()
    # tf.print(model.summary())

    # baselogger = callbacks.BaseLogger(stateful_metrics=['AUC'])

    log_name = '{}'.format(datetime.datetime.now().strftime('%Y%m%d-%H%M%S'))
    logdir = '../data/models/tf_models_imdb_sequential/{}'.format(log_name)

    tensorboard_callback = callbacks.TensorBoard(log_dir=logdir, histogram_freq=1)
    # history = model.fit(train_data, validation_data=test_data, epochs=6, callbacks=[baselogger, tensorboard_callback])
    history = model.fit(train_data, validation_data=test_data, epochs=6, callbacks=[tensorboard_callback])

    # plot
    plot_metric(history=history, metric='auc')


def build_model_api(train_data, test_data, max_words=10000, max_len=200):
    """
    方法二：函数式API创建任意结构模型
    :param train_data:
    :param test_data:
    :param max_words: 只考虑top 10000 的词
    :param max_len:  只考虑最长200
    :return:
    """
    def build_model():
        """
        API 构建模型
        :return:
        """
        # 清空会话
        backend.clear_session()

        input = layers.Input(shape=[max_len])
        x = layers.Embedding(input_dim=max_words, output_dim=7)(input)

        branch1 = layers.SeparableConv1D(filters=64, kernel_size=3, activation='relu')(x)
        branch1 = layers.MaxPool1D(3)(branch1)
        branch1 = layers.SeparableConv1D(filters=32, kernel_size=3, activation='relu')(branch1)
        branch1 = layers.GlobalMaxPool1D()(branch1)

        branch2 = layers.SeparableConv1D(filters=64, kernel_size=5, activation='relu')(x)
        branch2 = layers.MaxPool1D(5)(branch2)
        branch2 = layers.SeparableConv1D(filters=32, kernel_size=5, activation='relu')(branch2)
        branch2 = layers.GlobalMaxPool1D()(branch2)

        branch3 = layers.SeparableConv1D(filters=64, kernel_size=7, activation='relu')(x)
        branch3 = layers.MaxPool1D(7)(branch3)
        branch3 = layers.SeparableConv1D(filters=32, kernel_size=7, activation='relu')(branch3)
        branch3 = layers.GlobalMaxPool1D()(branch3)

        concat = layers.Concatenate()([branch1, branch2, branch3])
        outputs = layers.Dense(units=1, activation='sigmoid')(concat)

        model = models.Model(inputs=input, outputs=outputs)
        model.compile(optimizer='Nadam', loss='binary_crossentropy', metrics=['accuracy', 'AUC'])
        return model

    model = build_model()
    # tf.print(model.summary())

    log_name = '{}'.format(datetime.datetime.now().strftime('%Y%m%d-%H%M%S'))
    logdir = '../data/models/tf_models_imdb_api/{}'.format(log_name)

    tensorboard_callback = callbacks.TensorBoard(log_dir=logdir, histogram_freq=1)
    history = model.fit(train_data, validation_data=test_data, epochs=6, callbacks=[tensorboard_callback])

    # plot
    plot_metric(history=history, metric='auc')


class ResBlock(layers.Layer):
    """
    自定义一个残差模块，为自定义Layer
    """
    def __init__(self, kernel_size, **kwargs):
        super(ResBlock, self).__init__(**kwargs)
        self.kernel_size = kernel_size

    def build(self, input_shape):
        self.conv1 = layers.Conv1D(filters=64, kernel_size=self.kernel_size, activation='relu', padding='same')
        self.conv2 = layers.Conv1D(filters=32, kernel_size=self.kernel_size, activation='relu', padding='same')
        self.conv3 = layers.Conv1D(filters=input_shape[-1], kernel_size=self.kernel_size, activation='relu', padding='same')

        self.maxpool = layers.MaxPool1D(2)
        super(ResBlock, self).build(input_shape) # 相当于设置self.built = True

    def call(self, inputs):
        x = self.conv1(inputs)
        x = self.conv2(x)
        x = self.conv3(x)

        x = layers.Add()([inputs, x])
        x = self.maxpool(x)
        return x

    def get_config(self):
        """
        如果要让自定义的Layer通过Functional API 组合成模型时可以序列化，需要自定义get_config方法。
        :return:
        """
        config = super(ResBlock, self).get_config()
        config.update({'kernel_size': self.kernel_size})
        return config


class ImdbModel(models.Model):
    """
    自定义模型，实际上也可以使用Sequential或者FunctionalAPI
    """
    def __init__(self):
        super(ImdbModel, self).__init__()
        self.MAX_WORDS = 10000
        self.MAX_LEN = 200

    def build(self, input_shape):
        self.embedding = layers.Embedding(input_dim=self.MAX_WORDS, output_dim=7)
        self.block1 = ResBlock(kernel_size=7)
        self.block2 = ResBlock(kernel_size=5)
        self.dense = layers.Dense(units=1, activation='sigmoid')
        super(ImdbModel, self).build(input_shape)

    def call(self, x):
        x = self.embedding(x)
        x = self.block1(x)
        x = self.block2(x)
        x = layers.Flatten()(x)
        x = self.dense(x)
        return x


def build_model_class(train_data, test_data):
    """
    方法三：Model子类化创建自定义模型
    :param train_data:
    :param test_data:
    :return:
    """

    def build_model():
        """
        Model子类化 构建模型
        :return:
        """
        # 清空会话
        backend.clear_session()

        model = ImdbModel()
        model.build(input_shape=(None, 200))

        model.compile(optimizer='Nadam', loss='binary_crossentropy', metrics=['accuracy', 'AUC'])
        return model

    model = build_model()
    # tf.print(model.summary())

    log_name = '{}'.format(datetime.datetime.now().strftime('%Y%m%d-%H%M%S'))
    logdir = '../data/models/tf_models_imdb_class/{}'.format(log_name)

    tensorboard_callback = callbacks.TensorBoard(log_dir=logdir, histogram_freq=1)
    history = model.fit(train_data, validation_data=test_data, epochs=6, callbacks=[tensorboard_callback])

    # plot
    plot_metric(history=history, metric='auc')


def run():
    ds_train, ds_test = load_data_line()

    # 方法一：Sequential按层顺序创建模型
    # build_model_sequential(train_data=ds_train, test_data=ds_test)

    # 方法二：函数式API创建任意结构模型
    # build_model_api(train_data=ds_train, test_data=ds_test)

    # 方法三：Model子类化创建自定义模型
    build_model_class(train_data=ds_train, test_data=ds_test)


if __name__ == '__main__':
    run()
