# -- encoding:utf-8 --

import tensorflow as tf
from tensorflow.contrib import crf


class TextRNNCRF(object):
    """
    A RNN for text classification
    """

    def __init__(self, network_name, num_units, layers,
                 sequence_input_embedding, sequence_target_label,
                 num_class, lengths, keep_prob=1.0, *args, **kwargs):
        self.network_name = network_name  # 名称
        self.num_units = num_units  # RNN中神经元数目
        self.layers = layers  # 做几层结构
        self.lengths = lengths
        self.embedded_chars = sequence_input_embedding  # [N,T,E]
        self.sequence_labels = sequence_target_label  # [N,T]
        self.num_class = num_class

        with tf.variable_scope(self.network_name):
            # 2. Build RNN output
            with tf.variable_scope("rnn"):
                # a. 构建RNN结构
                def cell(num_units):
                    return tf.nn.rnn_cell.BasicLSTMCell(num_units)

                cell_fw = tf.nn.rnn_cell.MultiRNNCell(cells=[cell(self.num_units) for _ in range(self.layers)])
                cell_bw = tf.nn.rnn_cell.MultiRNNCell(cells=[cell(self.num_units) for _ in range(self.layers)])

                # b. 数据输入，并处理
                # outputs: tuple, ([B,T,U], [B,T,U])
                (output_fw, output_bw), _ = tf.nn.bidirectional_dynamic_rnn(
                    cell_fw,  # 前向的RNN Cell
                    cell_bw,  # 反向的RNN Cell
                    inputs=self.embedded_chars,  # RNN的输入，形状为: [N, T, E]
                    dtype=tf.float32  # RNN初始化状态的时候会使用到的数据类型参数，默认的初始化状态为Zeros
                )

                # c. 因为每个时刻都得做一个分类的操作，所以所有时刻的输出状态都需要保留下来
                output = tf.concat([output_fw, output_bw], axis=-1)  # [N,T,E] -> [N,T,2E]

                # d. 做一个drop out操作
                h_drop = tf.nn.dropout(output, keep_prob=keep_prob)

            # 3. Build FC output
            with tf.variable_scope("fc"):
                # [N,T,num_class] -> 表示N个序列，每个序列T个时刻长度，每个时刻对应num_class个类别的置信度
                self.scores = tf.layers.dense(inputs=h_drop, units=self.num_class)

                # 定义一个映射关系
                self.transition_params = tf.get_variable(name='transition_params',
                                                         shape=[self.num_class, self.num_class])
                with tf.variable_scope("loss"):
                    # log_likelihood：对数似然函数的值，乘以-1就是损失函数的值
                    # transition_params：训练好的状态转移系数矩阵，也就是传入的参数: transition_params
                    log_likelihood, self.transition_params = crf.crf_log_likelihood(
                        inputs=self.scores,  # 输入CRF的高阶向量，也就是每个时刻对应的属于各个类别的置信度，形状为: [N,T,num_tags]
                        tag_indices=self.sequence_labels,  # 当前样本对应的实际标签下标，形状为:[N,T]
                        sequence_lengths=self.lengths,  # 当前批次中各个样本的长度信息，形状为: [N,]
                        transition_params=self.transition_params  # 类别标签与标签之间的转换系数, 形状为: [num_tags, num_tags]
                    )
                    # 似然函数的值转换为损失函数的值
                    self.per_example_loss = -log_likelihood
                    tf.losses.add_loss(tf.reduce_mean(self.per_example_loss))

                # 使用CRF的解码操作得到最终预测结果
                # decode_tags：解码的标签值， [N,T]
                # best_score: 解码对应标签值的置信度值，[N,]
                decode_tags, best_score = crf.crf_decode(
                    potentials=self.scores,  # 输入CRF的高阶向量，也就是每个时刻对应的属于各个类别的置信度，形状为: [N,T,num_tags]
                    transition_params=self.transition_params,  # 类别标签与标签之间的转换系数, 形状为: [num_tags, num_tags]
                    sequence_length=self.lengths  # 当前批次中各个样本的长度信息，形状为: [N,]
                )
                self.predictions = tf.identity(decode_tags, "predictions")  # 别名

            # 4. Build Loss
            with tf.variable_scope("loss"):
                # cross:计算的是每个序列的每个时刻的交叉熵损失值，形状为: [N,T]
                self.total_loss = tf.losses.get_total_loss(name='total_loss')
