import tensorflow as tf
import numpy as np
from tensorflow_addons.text.crf import crf_log_likelihood, viterbi_decode

# 解决tf.placeholder() is not compatible with eager execution
tf.compat.v1.disable_eager_execution()
tf = tf.compat.v1


def network(inputs, shapes, num_tags, lstm_dim=100, initializer=tf.truncated_normal_initializer()):
    """

    :param inputs:
    :param shapes:
    :param num_tags:
    :param lstm_dim:
    :param initializer:
    :return:
    """

    # ----------------------特征嵌入--------------------------
    # 将所有的特征的id转换成一个固定长度的向量
    embedding = []
    keys = list(shapes.keys())
    for key in keys:
        with tf.variable_scope(key + '_embedding'):
            lookup = tf.get_variable(
                name=key + '_embedding',
                shape=shapes[key],
                initializer=initializer
            )
            embedding.append(tf.nn.embedding_lookup(lookup, inputs[key]))
    embed = tf.concat(embedding, axis=-1)

    sign = tf.sign(tf.abs(inputs[keys[0]]))
    lengths = tf.reduce_sum(sign, reduction_indices=1)  # 求出每个句子的真实长度
    num_time = tf.shape(inputs[keys[0]])[1]

    # ----------------------循环神经网络编码--------------------------
    #  tensorflow 2.0以后 contrib 被移除，将rnn.BasicLSTMCell 改成 tf.nn.rnn_cell.BasicLSTMCell
    with tf.variable_scope('BiLstm_layer1'):
        lstm_cell = {}
        for name in ['forward', 'backward']:
            with tf.variable_scope(name):
                lstm_cell[name] = tf.nn.rnn_cell.BasicLSTMCell(
                    lstm_dim
                    # 没有initializer参数，先去掉
                    # ,initializer=initializer
                )
        outputs1, finial_states1 = tf.nn.bidirectional_dynamic_rnn(
            lstm_cell['forward'],
            lstm_cell['backward'],
            embed,
            dtype=tf.float32,
            sequence_length=lengths
        )
    outputs1 = tf.concat(outputs1, axis=-1)
    with tf.variable_scope('BiLstm_layer2'):
        lstm_cell = {}
        for name in ['forward', 'backward']:
            with tf.variable_scope(name):
                lstm_cell[name] = tf.nn.rnn_cell.BasicLSTMCell(
                    lstm_dim
                    # 没有initializer参数，先去掉
                    # ,initializer=initializer
                )
        outputs, finial_states1 = tf.nn.bidirectional_dynamic_rnn(
            lstm_cell['forward'],
            lstm_cell['backward'],
            # embed, 改成outputs1
            outputs1,
            dtype=tf.float32,
            sequence_length=lengths
        )
    output = tf.concat(outputs, axis=-1)

    # ----------------------输出映射--------------------------
    output = tf.reshape(output, [-1, 2 * lstm_dim])
    with tf.variable_scope('project_larer1'):
        w = tf.get_variable(
            name='w',
            shape=[2 * lstm_dim, lstm_dim],
            initializer=initializer
        )
        b = tf.get_variable(
            name='b',
            shape=[lstm_dim],
            initializer=tf.zeros_initializer()
        )
        output = tf.nn.relu(tf.matmul(output, w) + b)

    with tf.variable_scope('project_larer2'):
        w = tf.get_variable(
            name='w',
            shape=[lstm_dim, num_tags],
            initializer=initializer
        )
        b = tf.get_variable(
            name='b',
            shape=[num_tags],
            initializer=tf.zeros_initializer()
        )
        output = tf.matmul(output, w) + b

    output = tf.reshape(output, [-1, num_time, num_tags])
    return output, lengths


class Model(object):
    def __init__(self, dict, lr = 0.0001):
        # ---------用到的参数值-----------
        self.num_char = len(dict['word'][0])
        self.num_bound = len(dict['bound'][0])
        self.num_flag = len(dict['flag'][0])
        self.num_radical = len(dict['radical'][0])
        self.num_pinyin = len(dict['pinyin'][0])
        self.num_tags = len(dict['label'][0])

        self.lstm_dim = 100
        self.pinyin_dim = 50
        self.radical_dim = 50
        self.flag_dim = 50
        self.bound_dim = 20
        self.char_dim = 100

        self.lr = lr

        # ---------定义接收数据的placeholder-----------
        self.char_inputs = tf.placeholder(dtype=tf.int32,shape=[None,None],name='char_inputs')
        self.bound_inputs = tf.placeholder(dtype=tf.int32,shape=[None,None],name='bound_inputs')
        self.flag_inputs = tf.placeholder(dtype=tf.int32,shape=[None,None],name='flag_inputs')
        self.radical_inputs = tf.placeholder(dtype=tf.int32,shape=[None,None],name='radical_inputs')
        self.pinyin_inputs = tf.placeholder(dtype=tf.int32,shape=[None,None],name='pinyin_inputs')
        self.targets = tf.placeholder(dtype=tf.int32,shape=[None,None],name='targets')

        self.global_step = tf.Variable(0,trainable=False)

        # ---------计算模型输出值-----------
        # 接收模型输出logits 和 每个句子的真实长度lengths
        self.logits,self.lengths = self.get_logits(self.char_inputs,
                                                   self.bound_inputs,
                                                   self.flag_inputs,
                                                   self.radical_inputs,
                                                   self.pinyin_inputs
                                                   )
        #---------计算损失-----------
        self.cost = self.loss(self.logits,self.targets,self.lengths)

        # ---------优化器优化-----------
        # 采用梯度截断技术
        with tf.variable_scope('optimizer'):
            opt = tf.train.AdamOptimizer(self.lr)
            grad_vars = opt.compute_gradients(self.cost)# 计算出所有参数的导数
            clip_grad_vars = [[tf.clip_by_value(g,-5,-5),v] for g,v in grad_vars] # 得到截断之后的梯度
            self.train_op = opt.apply_gradients(clip_grad_vars,self.global_step)

        # 模型保存器（只保留最近的5次模型，之前的会删除）
        self.saver = tf.train.Saver(tf.global_variables(),max_to_keep=5)

    def get_logits(self, char, bound, flag, radical, pinyin):
        """
        接收一个批次样本的特征数据，计算出网络的输出值
        :param char:
        :param bound:
        :param flag:
        :param radical:
        :param pinyin:
        :return:
        """
        shapes = {}
        shapes['char'] = [self.num_char, self.char_dim]
        shapes['bound'] = [self.num_bound, self.bound_dim]
        shapes['flag'] = [self.num_flag, self.flag_dim]
        shapes['radical'] = [self.num_radical, self.radical_dim]
        shapes['pinyin'] = [self.num_pinyin, self.pinyin_dim]

        inputs = {}
        inputs['char'] = char
        inputs['bound'] = bound
        inputs['flag'] = flag
        inputs['radical'] = radical
        inputs['pinyin'] = pinyin

        return network(inputs, shapes, lstm_dim=self.lstm_dim, num_tags=self.num_tags)

    def loss(self, output, targets, lengths):
        """
        计算损失
        :param output:
        :param targets:
        :param lengths:
        :return:
        """
        # b = len(lengths)
        b = tf.shape(lengths)[0]
        num_steps = tf.shape(output)[1]
        with tf.variable_scope('crf_loss'):
            small = -1000.0
            start_logits = tf.concat(
                [small * tf.ones(shape=[b, 1, self.num_tags]), tf.zeros],
                axis=-1
            )
            pad_logits = tf.cast(small * tf.ones([b, num_steps, 1]), tf.float32)

            logits = tf.concat([output, pad_logits], axis=-1)
            logits = tf.concat([start_logits, logits], axis=-1)
            targets = tf.concat(
                [tf.cast(self.num_tags * tf.ones([b, 1]), tf.int32), targets],
                axis=-1
            )

        self.trans = tf.get_variable(
            name='trans',
            shape=[self.num_tags + 1, self.num_tags + 1],
            initializer=tf.truncated_normal_initializer()
        )

        # 3. 安装tensorflow_addons, 将 from tensorflow.contrib.crf import crf_log_likelihood, viterbi_decode
        # 改成from tensorflow_addons.text.crf import crf_log_likelihood, viterbi_decode
        log_likehood,self.trans = crf_log_likelihood(
            inputs=logits,
            tag_indices=targets,
            transition_params=self.trans,
            sequence_lengths=lengths
        )
        return tf.reduce_mean(-log_likehood)
