# /usr/bin/env python
# -*- coding: utf-8 -*-

# https://blog.csdn.net/u011195077/article/details/87882634 lstm 输入输出的理解
# https://blog.csdn.net/zhylhy520/article/details/86364789 bilstm 函数介绍
"""
    基于Bilstm的文本分类和中文实体抽取:
        1.对于文本分类来说，需要最后一个time_step的输出;
        2.中文实体抽取则需要最终的outputs，即所有time_step的输出;
"""
import tensorflow as tf


class KeywordExtractModel(object):
    def __init__(self, model_params):
        self.name = 'tw_model'
        self.max_gradient_norm = 5.0
        self.class_num = int(model_params['class_num'])
        self.MAX_SEQ_LENGTH = int(model_params['max_seq_length'])
        self.vocab_size = int(model_params['vocab_size'])
        self.word_embedding_size = int(model_params['word_embedding_size'])
        self.lstm_cell_size = int(model_params['lstm_cell_size'])
        self.num_layers = int(model_params['num_layers'])
        self.learning_rate = tf.Variable(float(model_params['learning_rate']), name='learning_rate', trainable=False)
        self.global_step = tf.Variable(0, name="global_step", trainable=False)
        self._post_train_ops = []

        self._create_embedders()
        self._bid_lstm_network()
        self._fc()
        self._def_loss()
        self._def_optimize()
        self.saver = tf.train.Saver(tf.global_variables(), max_to_keep=20)

    def _create_embedders(self):
        with tf.name_scope('inputs'):
            # batch (batch_size * max_seq_length 的矩阵)
            self._src_input_data = tf.placeholder(tf.int32, [None, self.MAX_SEQ_LENGTH], name='source_sequence')
            # batch (batch_size 一维向量) 每个query 分词后的数量(最大值是max_seq_length)
            self._src_seq_length = tf.placeholder(tf.int32, [None], name='source_length')
            # 该值越小丢掉的神经元越多
            self.keep_prob = tf.placeholder(tf.float32, name='keep_prob')
            # 预测结果 (batch_size * max_seq_length 的矩阵)
            self.pred = tf.placeholder(tf.int32, [None, self.MAX_SEQ_LENGTH], name="pred")
            # 词向量(这里的词向量是随机生成的, 可优化)
            self.word_embedding = tf.get_variable('word_embedding', [self.vocab_size, self.word_embedding_size],
                                                  initializer=tf.random_uniform_initializer(-0.5, 0.5))
            # 获取词向量(batch_size * max_seq_length * word_embedding_size 的矩阵)
            self.src_input_distributed = tf.nn.embedding_lookup(self.word_embedding, self._src_input_data,
                                                                name='dist_source')

            tf.add_to_collection('_src_input_data', self._src_input_data)
            tf.add_to_collection('_src_seq_length', self._src_seq_length)
            tf.add_to_collection('keep_prob', self.keep_prob)

    def _bid_lstm_network(self):
        def lstm_cell(lstm_size, keep_prob=0.75):
            cell = tf.nn.rnn_cell.LSTMCell(lstm_size, reuse=tf.AUTO_REUSE)
            drop = tf.nn.rnn_cell.DropoutWrapper(cell, output_keep_prob=keep_prob)
            return drop

        # 词向量(batch_size, max_seq_length, word_embedding_size 矩阵)
        _src_inputs = self.src_input_distributed
        # batch 个token个数
        _src_length = self._src_seq_length
        for layer in range(self.num_layers):
            with tf.variable_scope('src_encoder_{}'.format(layer)) as src_scope:
                src_fw_cell = lstm_cell(self.lstm_cell_size, self.keep_prob)
                src_bw_cell = lstm_cell(self.lstm_cell_size, self.keep_prob)
                # 生成[
                #       [batch_size, max_seq_length, lstm_cell_size],
                #       [batch_size, max_seq_length, lstm_cell_size]
                #     ]
                src_lstm_outputs, _ = tf.nn.bidirectional_dynamic_rnn(
                    cell_fw=src_fw_cell,
                    cell_bw=src_bw_cell,
                    inputs=_src_inputs,
                    sequence_length=_src_length,
                    dtype=tf.float32,
                    scope=src_scope)
                # 生成(batch_size, max_seq_length, lstm_cell_size * 2)
                _src_inputs = tf.concat(src_lstm_outputs, axis=2)
        # 生成(batch_size * max_seq_length, lstm_cell_size * 2), 这样将query维度转换到token维度
        self.output = tf.reshape(_src_inputs, [-1, self.lstm_cell_size * 2])

    def _fc(self):
        with tf.variable_scope('fc'):
            fc_w = tf.Variable(tf.truncated_normal([self.lstm_cell_size * 2, self.class_num], stddev=0.01))
            fc_b = tf.Variable(tf.constant(1.0, shape=[self.class_num]))
            # 生成(batch_size * max_seq_length, self.class_num)
            self._pred = tf.matmul(self.output, fc_w) + fc_b
            # 生成一维向量,长度为batch_size * max_seq_length
            self.predict = tf.argmax(self._pred, 1)
            tf.add_to_collection('predict', self.predict)

    def _def_loss(self):
        with tf.variable_scope('loss'):
            correct_prediction = tf.equal(tf.cast(self.predict, tf.int32), tf.reshape(self.pred, [-1]))
            self.accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
            self.loss = tf.reduce_mean(
                tf.nn.sparse_softmax_cross_entropy_with_logits(labels=tf.reshape(self.pred, [-1]), logits=self._pred))

    def _def_optimize(self):
        optimizer = tf.train.AdamOptimizer(self.learning_rate)
        tvars = tf.trainable_variables()
        grads, _ = tf.clip_by_global_norm(tf.gradients(self.loss, tvars), self.max_gradient_norm)
        self.train = optimizer.apply_gradients(list(zip(grads, tvars)), global_step=self.global_step)
        self._add_post_train_ops()

    def _add_post_train_ops(self):
        with tf.control_dependencies([self.train]):
            self.train = tf.group(self.train, *self._post_train_ops)


if __name__ == '__main__':
    model_params = {
        'max_seq_length': 50,
        'vocab_size': 400000,
        'class_num': 2,
        'word_embedding_size': 64,
        'lstm_cell_size': 50,
        'num_layers': 1,
        'train_keep_prob': 0.5,
        'learning_rate': 0.1,
        'learning_rate_decay_factor': 0.99999
    }
    model = KeywordExtractModel(model_params)
