import tensorflow as tf
import numpy as np
from tensorflow.contrib import rnn
from tensorflow.contrib.crf import crf_log_likelihood

def network(char, bound, flag, radical, pinyin,shapes,num_tags,lstm_dim = 100, initializer=tf.truncated_normal_initializer()):
    # 都转换成向量
    """

           :param char: int类型
           :param bound:
           :param flag:
           :param radical:
           :param pinyin:
           :return:
           特征嵌入，将特征char, bound, flag, radical, pinyin转换成固定长度的向量
           """
    # 接受一个批次样本的特征数据，计算出网络的输出值
    embedding = []
    with tf.variable_scope('char_embedding'):
        # variable_scope用于定义创建变量（层）的操作的上下文管理器，目的为了共享变量
        char_lookup = tf.get_variable(
            name='char_embedding',
            shape=shapes['char'],
            initializer=initializer
        )
        # embedding_lookup
        embedding.append(tf.nn.embedding_lookup(char_lookup,char))

    with tf.variable_scope('bound_embedding'):
        # variable_scope用于定义创建变量（层）的操作的上下文管理器，目的为了共享变量
        bound_lookup = tf.get_variable(
            name='bound_embedding',
            shape=shapes['bound'],
            initializer=initializer
        )
        # embedding_lookup
        embedding.append(tf.nn.embedding_lookup(bound_lookup,bound))

    with tf.variable_scope('flag_embedding'):
        # variable_scope用于定义创建变量（层）的操作的上下文管理器，目的为了共享变量
        flag_lookup = tf.get_variable(
            name='flag_embedding',
            shape=shapes['flag'],
            initializer=initializer
        )
        # embedding_lookup
        embedding.append(tf.nn.embedding_lookup(flag_lookup,flag))

    with tf.variable_scope('radical_embedding'):
        # variable_scope用于定义创建变量（层）的操作的上下文管理器，目的为了共享变量
        radical_lookup = tf.get_variable(
            name='radical_embedding',
            shape=shapes['radical'],
            initializer=initializer
        )
        # embedding_lookup
        embedding.append(tf.nn.embedding_lookup(radical_lookup,radical))

    with tf.variable_scope('pinyin_embedding'):
        # variable_scope用于定义创建变量（层）的操作的上下文管理器，目的为了共享变量
        pinyin_lookup = tf.get_variable(
            name='pinyin_embedding',
            shape=shapes['pinyin'],
            initializer=initializer
        )
        # embedding_lookup
        embedding.append(tf.nn.embedding_lookup(pinyin_lookup,pinyin))
    embed = tf.concat(embedding,axis=-1) # 将所有特征 *_dim拼接

    keys = list(shapes.keys())
    sign = tf.sign(tf.abs(keys[0]))
    lengths = tf.reduce_sum(sign,reduction_indices=1) # 求出每一个句子的真实长度
    num_steps = tf.shape([keys[0]])[1]
    # ==============================循环神经网络及其编码================

    with tf.variable_scope("BiLstm_layer1"):
        lstm_cell = {}
        for name in ['forward1','backward1']:
            with tf.variable_scope(name):
                lstm_cell[name] = rnn.BasicLSTMCell(
                    lstm_dim, # 神经元的个数
                    initializer = initializer

                )
        outputs_1,final_states_1 = tf.nn.bidirectional_dynamic_rnn(
            lstm_cell['forward1'],
            lstm_cell['backward1'],
            embed,
            dtype=tf.float32,
            sequence_length=lengths

        )
    outputs_1 = tf.concat(outputs_1,axis=-1) # b,L,2*Lstm_dim 作为下一层的输入
# =====================================================================
    with tf.variable_scope("BiLstm_layer2"):
        lstm_cell = {}
        for name in ['forward', 'backward']:
            with tf.variable_scope(name):
                lstm_cell[name] = rnn.BasicLSTMCell(
                    lstm_dim,  # 神经元的个数
                    initializer=initializer
                )
        outputs, final_states = tf.nn.bidirectional_dynamic_rnn(
            lstm_cell['forward'],
            lstm_cell['backward'],
            embed,
            dtype=tf.float32,
            sequence_length=lengths

        )
    output = tf.concat(outputs,axis=-1)
    return output # batch_szie batch_length,maxlength 2*lstm_dim
# ==================输出映射========================================
    output = tf.reshape(output,[-1,2*lstm_dim])
    #  reshape成二维矩阵 batch_szie*maxlength，2*lstm_dim

    #===================第一层映射结果==================
    with  tf.variable_scope('project_larer1'):
        w = tf.get_variable(
            name = 'w',
            shape=[2*lstm_dim,lstm_dim],
            initializer=initializer
        )
        b = tf.get_variable(
            name = 'b',
            shape=[lstm_dim],
            initializer=tf.zeros_initializer()
        )
        output = tf.nn.relu(tf.matmul(output,w)+b)

    # ===================第二层映射结果==================
    with  tf.variable_scope('project_larer2'):
        w = tf.get_variable(
            name = 'w',
            shape=[lstm_dim,num_tags],
            initializer=initializer
        )
        b = tf.get_variable(
            name = 'b',
            shape=[num_tags],
            initializer=tf.zeros_initializer()
        )
        output = tf.matmul(output,w)+b
    output = tf.reshape(output,[-1,num_time,num_tags])

    return output,lengths
    #  batch_szie  max_length num_tags


class Model(object):
    def __init__(self):
        self.num_char = len(dict['word'][0])
        self.num_bound = len(dict['bound'][0])
        self.num_flag = len(dict['flag'][0])
        self.num_radical = len(dict['radical'][0])
        self.num_pinyin = len(dict['pinyin'][0])
        self.num_tags = len(dict['label'][0])
        self.char_dim = 100
        self.bound_dim = 20
        self.flag_dim = 50
        self.radical_dim = 50
        self.pinyin_dim = 50
        self.lstm_dim = 100

    def get_logits(self, char, bound, flag, radical, pinyin):
        """

        :param char: int类型
        :param bound:
        :param flag:
        :param radical:
        :param pinyin:
        :return: 3d的矩阵 其中有 【batch_size,max_length,num_tags】
        """
        # 接受一个批次样本的特征数据，计算出网络的输出值
        shapes = {}
        shapes['char'] = [self.num_char, self.char_dim]
        shapes['bound'] = [self.num_bound, self.bound_dim]
        shapes['flag'] = [self.num_flag, self.num_flag]
        shapes['radical'] = [self.num_radical, self.num_radical]
        shapes['pinyin'] = [self.num_pinyin, self.num_pinyin]

        inputs={}
        inputs['char'] = char
        inputs['bound'] = bound
        inputs['flag'] = flag
        inputs['radical'] = radical
        inputs['pinyin'] = pinyin
        return network(char, bound, flag, radical, pinyin,shapes,num_tags=self.num_tags,lstm_dim=self.lstm_dim)

    # ==============loss function ========
    def loss(self,output,targets,lengths):
        b = len(lengths)
        num_steps = tf.shape(output)[1]
        with tf.variable_scope('crf_loss'):
            small = -1000.0 # log -1000 非常接近0的时候
            strat_logits = tf.concat(
                [small * tf.ones(shape=[b,1,self.num_tags]),tf.zeros(shape=[b,1,1])],axis=-1
            )
            pad_logits = tf.cast(small * tf.ones([b,num_steps,1]),tf.float32)

            logits = tf.concat([output,pad_logits],axis=-1)
            logits = tf.concat([strat_logits,logits],axis=-1)
            targets = tf.concat(
                [tf.cast(self.num_tags*tf.ones([b,1]),tf.float32),targets],axis=-1
            )
            self.trans = tf.get_variable(
                name = 'trans',
                shape=[self.num_tags + 1,self.num_tags +1],
                initializer=tf.truncated_normal_initializer()
            )
            log_likehood,self.trans = crf_log_likelihood(
                inputs = logits,
                tag_indices=targets,
                transition_params=self.trans,
                sequence_lengths=lengths
            )
            return tf.reduce_mean(-log_likehood)
