# -*- coding: utf-8 -*-
# @Time    : 2019/5/29 21:59
# @Author  : DrMa

import tensorflow as tf
import numpy as np
from sklearn import metrics
from tensorflow.python.ops.rnn_cell_impl import LSTMStateTuple#牛逼！


def My_func_for_acc_count(logits,labels,threshold_value,num_label):
    '''
    :param logits: 输出的概率分布
    :param labels: 标签的one_hot形式
    :param threshold_value: 阈值
    :return: 准确率/recall
    '''
    logits_one_hot = []
    for sample in logits:
        temp_one_hot = np.zeros(num_label, dtype=np.float32)
        index = np.argwhere(sample >= threshold_value)
        if len(index) == 0:
            index = np.argmax(sample)
        temp_one_hot[index] = 1.0
        logits_one_hot.append(temp_one_hot)
    logits_one_hot = np.asarray(logits_one_hot, dtype=np.float32)
    #labels:给个one_hot的形式
    #logits_one_hot:
    micro_precision = np.asarray(metrics.precision_score(labels,logits_one_hot,average='micro'),dtype=np.float32)
    micro_recall = np.asarray(metrics.recall_score(labels,logits_one_hot, average='micro'),dtype=np.float32)
    micro_f1 = np.asarray(metrics.f1_score(labels,logits_one_hot, average='micro'),dtype=np.float32)
    return  micro_precision,micro_recall,micro_f1
def precision_recall(logits,labels,threshold_value,num_label):
    precison,recall,micro_f1 = tf.py_func(My_func_for_acc_count,
                                          [logits, labels,threshold_value,num_label],
                                          [tf.float32,tf.float32,tf.float32])
    return precison,recall,micro_f1

def My_func_for_one_hot_logits(logits,threshold_value,num_label):
    logits_one_hot = []
    for sample in logits:
        temp_one_hot = np.zeros(num_label, dtype=np.float32)
        index = np.argwhere(sample >= threshold_value)
        if len(index) == 0:
            index = np.argmax(sample)
        temp_one_hot[index] = 1.0
        logits_one_hot.append(temp_one_hot)
    logits_one_hot = np.asarray(logits_one_hot, dtype=np.float32)
    return logits_one_hot
def get_one_hot_logits(logits,threshold_value,num_label):
    one_hot_logits=tf.py_func(My_func_for_one_hot_logits,[logits,threshold_value,num_label],[tf.float32])
    return one_hot_logits

class Our_model_CAIL(object):
    def __init__(self, embedding_table,model_config,Train_or_predict="Train"):#
        self.batch_size=model_config.batch_size
        self.num_label=model_config.num_label#标签个数+特殊标签, 106+2,最后进行预测的时候,我们把标签个数设置为107,就是把其实标签给filter_out
        self.doc_len=model_config.doc_len
        self.num_sentence =model_config.num_sentence#句子个数
        self.learning_rate=model_config.learning_rate
        self.rnn_type=model_config.rnn_type

        self.hidden_size=model_config.hidden_size
        self.num_layers=model_config.num_layers
        self.dropout_keep_prob=model_config.dropout_keep_prob
        self.embedding_table = embedding_table
        self.embedding_dim = model_config.embedding_dim
        self.embedding_dim_label= model_config.embedding_dim_label
        self.threshold = model_config.threshold_value


        self.input_x = tf.placeholder(tf.int32, [self.batch_size, self.doc_len], name='input_x')
        self.text_len = tf.placeholder(shape=[self.batch_size], dtype=tf.int32)

        self.batch_label_src = tf.placeholder(shape=[self.batch_size, None],dtype=tf.int32, name='src_label')
        self.batch_label_tgt = tf.placeholder(shape=[self.batch_size, None], dtype=tf.int32, name='tgt_label')
        self.label_len = tf.placeholder(shape=[self.batch_size], dtype=tf.int32)

        self.sos_label_id = model_config.sos_label_id
        self.eos_label_id = model_config.eos_label_id
        self.max_label_num = model_config.max_label_num
        if Train_or_predict=='Train':
            self.train_model()
        else:
            self.predict_model()


    def rnn_cell(self):
        """获取rnn的cell，可选RNN、LSTM、GRU"""
        if self.rnn_type == "vanilla":
            return tf.nn.rnn_cell.BasicRNNCell(self.hidden_size)
        elif self.rnn_type == "lstm":
            return tf.nn.rnn_cell.BasicLSTMCell(self.hidden_size)
        elif self.rnn_type == "gru":
            return tf.nn.rnn_cell.GRUCell(self.hidden_size)
        else:
            raise Exception("rnn_type must be vanilla、lstm or gru!")
    def bidirectional_rnn(self, inputs, name):
        with tf.variable_scope(name):
            fw_cell = self.rnn_cell()
            fw_cell = tf.nn.rnn_cell.DropoutWrapper(fw_cell, output_keep_prob=self.dropout_keep_prob)
            bw_cell = self.rnn_cell()
            bw_cell = tf.nn.rnn_cell.DropoutWrapper(bw_cell, output_keep_prob=self.dropout_keep_prob)
            (output_fw, output_bw), states = tf.nn.bidirectional_dynamic_rnn(cell_fw=fw_cell,cell_bw=bw_cell,
                                                                             inputs=inputs,dtype=tf.float32)
        return output_fw, output_bw
    def attention(self, inputs, name):
        with tf.variable_scope(name):
            # 采用general形式计算权重
            hidden_vec = tf.layers.dense(inputs, self.hidden_size * 2, activation=tf.nn.tanh, name='w_hidden')

            u_context = tf.Variable(tf.truncated_normal([self.hidden_size * 2]), name='u_context')

            alpha = tf.nn.softmax(tf.reduce_sum(tf.multiply(hidden_vec, u_context),
                                                axis=2,
                                                keep_dims=True),
                                  dim=1)
            #alpha=[None*num_sentences,sentence_len]
            #hidden_vec=[None*num_sentences,sentence_len,2*hidden_dim]
            #u_context=[2*hidden_dim],这是个引导向量
            #hidden_vec和u_context相乘后然后reduce_sum,首先shape=[None*num_sentences,sentence_len,2*hidden_dim],
            # 然后reduce_sum,shape=[None*num_sentences,sentence_len,1],然后softmax
            # 对隐藏状态进行加权

            attention_output = tf.reduce_sum(tf.multiply(inputs, alpha), axis=1)

        return attention_output

    def model_encoder(self):

        #encoder:
        with tf.device("/cpu:0"), tf.name_scope("embedding_scope"):  # 加载词向量用CPU加载
            embedding = tf.Variable(self.embedding_table, trainable=False)#[1469766,100]
            input_x = tf.split(self.input_x, self.num_sentence, axis=1)
            input_x = tf.stack(input_x, axis=1)
            embedding_inputs = tf.nn.embedding_lookup(embedding, input_x)
            sentence_len = int(self.doc_len / self.num_sentence)
            embedding_inputs_reshaped = tf.reshape(embedding_inputs,
                                                   shape=[-1, sentence_len, self.embedding_dim])

            # [batch_size*num_sentence,sentence_len,embed_size]
        with tf.name_scope("word_encoder"):
            (output_fw, output_bw) = self.bidirectional_rnn(embedding_inputs_reshaped, "word_encoder")
            word_hidden_state = tf.concat((output_fw, output_bw), 2)
            # [batch_size*num_sentences,sentence_len,hidden_size * 2]
        #单词层
        with tf.name_scope("word_attention"):
            sentence_vec = self.attention(word_hidden_state, "word_attention")
            # [batch_size*num_sentences, hidden_size * 2]
        # 句子层
        with tf.name_scope("sentence_encoder"):
            sentence_vec = tf.reshape(sentence_vec, shape=[-1, self.num_sentence, self.hidden_size * 2])
            # [batch_size,num_sentences,hidden_size*2]
            output_fw, output_bw = self.bidirectional_rnn(sentence_vec, "sentence_encoder")
            sentence_hidden_state = tf.concat((output_fw, output_bw), 2)
            # [batch_size, num_sentences, hidden_size * 2]
            #这个作为decoder的attention的输入, 我们对句子进行注意力操作.

        with tf.name_scope('sentence_attention'):
            doc_vec = self.attention(sentence_hidden_state, "sentence_attention")
            # [batch_size, hidden_size * 2]
        with tf.name_scope('dropout'):
            h_drop = tf.nn.dropout(doc_vec, self.dropout_keep_prob)
            #这个作为decorder的初始状态, [batch_size, hidden_size * 2]

        return h_drop,sentence_hidden_state
        #这两个一个作为encoder的输出[batch_size, hidden_size * 2],
        #一个作为带有时间步的文本信息, 后期decoder中要attention, [batch_size, hidden_size * 2]
    def model_decoder(self,enc_out,enc_att_out):

        with tf.name_scope('dec_init'):
            self.dec_cell = tf.nn.rnn_cell.MultiRNNCell(
                [tf.nn.rnn_cell.LSTMCell(self.embedding_dim_label) for _ in range(self.num_layers)])

            self.w_dec = tf.get_variable('w_dec', [self.embedding_dim_label, self.num_label-1])
            self.b_dec = tf.get_variable('b_dec', [self.num_label-1])  # 实际预测55个：0-53,54<eos>

        with tf.name_scope('dec_label_embed'):
            self.embed_table_lb = tf.get_variable(name='embed_table_lb',
                                             shape=[self.num_label, self.embedding_dim_label],
                                             dtype=tf.float32)
            embed_label = tf.nn.embedding_lookup(self.embed_table_lb, self.batch_label_src)
            embed_label = tf.nn.dropout(embed_label, self.dropout_keep_prob)
            # shape= [batch,None,embed_size]

        with tf.name_scope('dec_attention'):
            attention_mechanism = tf.contrib.seq2seq.LuongAttention(self.embedding_dim_label,
                                                                    enc_att_out,
                                                                    memory_sequence_length=self.text_len)
            attention_cell = tf.contrib.seq2seq.AttentionWrapper(self.dec_cell,
                                                                 attention_mechanism,
                                                                 attention_layer_size=self.embedding_dim_label,
                                                                 alignment_history=False)
            init_enc = enc_out  # [batch_size, hidden_size * 2],[32, 100]
            c_state = init_enc
            h_state = init_enc

            lstm_tuple = LSTMStateTuple(c_state, h_state)
            init_state = attention_cell.zero_state(dtype=tf.float32, batch_size=self.batch_size)

            init_state_ = init_state.clone(cell_state=(lstm_tuple, lstm_tuple),
                                           attention=init_enc,
                                           time=0,
                                           alignments=tf.zeros([self.batch_size, tf.shape(enc_att_out)[1]]),
                                           alignment_history=(),
                                           attention_state=tf.zeros(
                                               [self.batch_size, tf.shape(enc_att_out)[1]]))
            # 开始循环
            dec_att, attention_state = tf.nn.dynamic_rnn(attention_cell, embed_label, self.label_len,
                                                         dtype=tf.float32, initial_state=init_state_)

            output = tf.reshape(dec_att, [-1, self.embedding_dim_label])  # [batch_size*None,embedding_dim_label]

            logits = tf.matmul(output, self.w_dec) + self.b_dec
            # [batch_size, None, embedding_dim_label]
            return logits, attention_cell

    def train_model(self):

        enc_out, enc_att_out=self.model_encoder()
        logits,_=self.model_decoder(enc_out, enc_att_out)

        loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=tf.reshape(self.batch_label_tgt, [-1]),
                                                              logits=logits)
        # [batch_size*max_label_len]  #每个样本对应的标签的数量，最大的那个

        label_weights = tf.sequence_mask(self.label_len, maxlen=tf.shape(self.batch_label_src)[1], dtype=tf.float32)
        # [batch_size,max_label_len]
        label_weights = tf.reshape(label_weights, [-1])
        # [batch_size*max_label_len]

        cost = tf.reduce_sum(loss * label_weights)  # batch中所有的标签loss加和，batch增加，它就增加

        self.cost_per_label = cost / tf.reduce_sum(label_weights)  # 每个样本中每个label上的loss
        self.cost_per_sample = cost / tf.to_float(self.batch_size)  # 每个样本上的loss

        self.cost_summary = tf.summary.scalar('cost', self.cost_per_sample)

        self.train_op = tf.train.AdamOptimizer(learning_rate=self.learning_rate).minimize(self.cost_per_sample)

    def predict_model(self):
        enc_out, enc_att_out = self.model_encoder()
        _, attention_cell = self.model_decoder(enc_out, enc_att_out)

        init_enc = enc_out  # [batch_size, hidden_size * 2],[32, 100]
        c_state = init_enc
        h_state = init_enc
        lstm_tuple = LSTMStateTuple(c_state, h_state)
        init_state = attention_cell.zero_state(dtype=tf.float32, batch_size=self.batch_size)

        init_state_ = init_state.clone(cell_state=(lstm_tuple, lstm_tuple),
                                       attention=init_enc,
                                       time=0,
                                       alignments=tf.zeros([self.batch_size, tf.shape(enc_att_out)[1]]),
                                       alignment_history=(),
                                       attention_state=tf.zeros(
                                           [self.batch_size, tf.shape(enc_att_out)[1]]))
        init_array = tf.TensorArray(dtype=tf.int32, size=0, dynamic_size=True, clear_after_read=False)
        init_array = init_array.write(0, self.sos_label_id)
        init_loop_var = (init_state_, init_array, 0)

        def continue_loop_condition(state, trg_ids, step):
            return tf.reduce_all(
                tf.logical_and(tf.not_equal(trg_ids.read(step), self.eos_label_id),
                               tf.less(step, self.max_label_num)))

        def loop_body(state, trg_ids, step):
            trg_input = [trg_ids.read(step)]  # [1]
            trg_emb = tf.nn.embedding_lookup(self.embed_table_lb, trg_input)  # [1,embed_size_lb]
            # 循环核心，cell有输入，有输出，
            dec_outputs, next_state = attention_cell.call(state=state, inputs=trg_emb)

            logits = tf.matmul(dec_outputs, self.w_dec)+self.b_dec  #保持和decoder的一致.
            next_id = tf.argmax(logits, axis=1, output_type=tf.int32)
            trg_ids = trg_ids.write(step + 1, next_id[0])
            return next_state, trg_ids, step + 1

        state, trg_ids, step = tf.while_loop(continue_loop_condition, loop_body, init_loop_var)

        self.label_pre = trg_ids.stack()

class Our_model_CJO(object):
    def __init__(self, embedding_table, model_config, Train_or_predict="Train"):#

        self.batch_size=model_config.batch_size
        self.num_label=model_config.num_label#标签个数+特殊标签, 106+2,最后进行预测的时候,我们把标签个数设置为106+2-1,就是把其实标签给filter_out

        self.doc_len=model_config.len_doc
        self.law_len=model_config.len_law

        self.num_sentence_fact =model_config.num_sentence_fact#句子个数
        self.num_sentence_law= model_config.num_sentence_law#法律句子个数

        self.learning_rate=model_config.learning_rate
        self.rnn_type=model_config.rnn_type
        self.hidden_size=model_config.hidden_size
        self.num_layers=model_config.num_layers

        self.dropout_keep_prob=model_config.dropout_keep_prob

        self.embedding_table = embedding_table

        self.embedding_dim_word = model_config.embedding_dim_word
        self.embedding_dim_label= model_config.embedding_dim_label

        self.input_fact = tf.placeholder(tf.int32, [self.batch_size, self.doc_len], name='input_fact')
        self.input_law=tf.placeholder(tf.int32, [self.batch_size, self.law_len], name='input_law')

        self.txt_len_att = tf.placeholder(shape=[self.batch_size], dtype=tf.int32)#向attention_cell中放入,实际是num_sentence
        self.law_len_att = tf.placeholder(shape=[self.batch_size], dtype=tf.int32)

        self.batch_label_src = tf.placeholder(shape=[self.batch_size, None],dtype=tf.int32, name='src_label')
        self.batch_label_tgt = tf.placeholder(shape=[self.batch_size, None], dtype=tf.int32, name='tgt_label')
        self.label_len = tf.placeholder(shape=[self.batch_size], dtype=tf.int32)

        self.sos_label_id = model_config.sos_label_id
        self.eos_label_id = model_config.eos_label_id
        self.max_label_num = model_config.max_label_num
        if Train_or_predict=='Train':
            self.train_model()
        else:
            self.predict_model()

    def rnn_cell(self):
        """获取rnn的cell，可选RNN、LSTM、GRU"""
        if self.rnn_type == "vanilla":
            return tf.nn.rnn_cell.BasicRNNCell(self.hidden_size)
        elif self.rnn_type == "lstm":
            return tf.nn.rnn_cell.BasicLSTMCell(self.hidden_size)
        elif self.rnn_type == "gru":
            return tf.nn.rnn_cell.GRUCell(self.hidden_size)
        else:
            raise Exception("rnn_type must be vanilla、lstm or gru!")
    def bidirectional_rnn(self, inputs, name):
        with tf.variable_scope(name):
            fw_cell = self.rnn_cell()
            fw_cell = tf.nn.rnn_cell.DropoutWrapper(fw_cell, output_keep_prob=self.dropout_keep_prob)
            bw_cell = self.rnn_cell()
            bw_cell = tf.nn.rnn_cell.DropoutWrapper(bw_cell, output_keep_prob=self.dropout_keep_prob)
            (output_fw, output_bw), states = tf.nn.bidirectional_dynamic_rnn(cell_fw=fw_cell,cell_bw=bw_cell,
                                                                             inputs=inputs,dtype=tf.float32)
        return output_fw, output_bw
    def attention(self, inputs, name):
        with tf.variable_scope(name):
            # 采用general形式计算权重
            hidden_vec = tf.layers.dense(inputs, self.hidden_size * 2, activation=tf.nn.tanh, name='w_hidden')

            u_context = tf.Variable(tf.truncated_normal([self.hidden_size * 2]), name='u_context')

            alpha = tf.nn.softmax(tf.reduce_sum(tf.multiply(hidden_vec, u_context),
                                                axis=2,
                                                keep_dims=True),
                                  dim=1)
            #alpha=[None*num_sentences,sentence_len]
            #hidden_vec=[None*num_sentences,sentence_len,2*hidden_dim]
            #u_context=[2*hidden_dim],这是个引导向量
            #hidden_vec和u_context相乘后然后reduce_sum,首先shape=[None*num_sentences,sentence_len,2*hidden_dim],
            # 然后reduce_sum,shape=[None*num_sentences,sentence_len,1],然后softmax
            # 对隐藏状态进行加权

            attention_output = tf.reduce_sum(tf.multiply(inputs, alpha), axis=1)

        return attention_output
    def get_embedding_table(self):
        with tf.device("/cpu:0"), tf.name_scope("get_embedding_talbe"):  # 加载词向量用CPU加载
            embedding = tf.Variable(self.embedding_table, trainable=False)#[1469766,100]
        return embedding
    def model_encoder_fact(self,embedding):
        #encoder:
        with tf.device("/cpu:0"), tf.name_scope("embedding_scope"):  # 加载词向量用CPU加载
            input_x = tf.split(self.input_fact, self.num_sentence_fact, axis=1)
            input_x = tf.stack(input_x, axis=1)
            embedding_inputs = tf.nn.embedding_lookup(embedding, input_x)
            sentence_len = int(self.doc_len / self.num_sentence_fact)
            embedding_inputs_reshaped = tf.reshape(embedding_inputs,
                                                   shape=[-1, sentence_len, self.embedding_dim_word])
            # [batch_size*num_sentence,sentence_len,embed_size]
        with tf.name_scope("word_encoder"):
            (output_fw, output_bw) = self.bidirectional_rnn(embedding_inputs_reshaped, "word_encoder")
            word_hidden_state = tf.concat((output_fw, output_bw), 2)
            # [batch_size*num_sentences,sentence_len,hidden_size * 2]
        #单词层
        with tf.name_scope("word_attention"):
            sentence_vec = self.attention(word_hidden_state, "word_attention")
            # [batch_size*num_sentences, hidden_size * 2]
        # 句子层
        with tf.name_scope("sentence_encoder"):
            sentence_vec = tf.reshape(sentence_vec, shape=[-1, self.num_sentence_fact, self.hidden_size * 2])
            # [batch_size,num_sentences,hidden_size*2]
            output_fw, output_bw = self.bidirectional_rnn(sentence_vec, "sentence_encoder")
            sentence_hidden_state = tf.concat((output_fw, output_bw), 2)
            # [batch_size, num_sentences, hidden_size * 2]
            #这个作为decoder的attention的输入, 我们对句子进行注意力操作.

        with tf.name_scope('sentence_attention'):
            doc_vec = self.attention(sentence_hidden_state, "sentence_attention")
            # [batch_size, hidden_size * 2]
        with tf.name_scope('dropout'):
            h_drop = tf.nn.dropout(doc_vec, self.dropout_keep_prob)
            #这个作为decorder的初始状态, [batch_size, hidden_size * 2]

        return h_drop, sentence_hidden_state
        #这两个一个作为encoder的输出[batch_size, hidden_size * 2],
        #一个作为带有时间步的文本信息, 后期decoder中要attention, [batch_size, hidden_size * 2]
    def model_encoder_law(self,embedding):
        # encoder:
        with tf.device("/cpu:0"), tf.name_scope("embedding_scope_law"):  # 加载词向量用CPU加载
            input_x = tf.split(self.input_law, self.num_sentence_law, axis=1)
            input_x = tf.stack(input_x, axis=1)
            embedding_inputs = tf.nn.embedding_lookup(embedding, input_x)
            sentence_len = int(self.law_len / self.num_sentence_law)
            embedding_inputs_reshaped = tf.reshape(embedding_inputs,
                                                   shape=[-1, sentence_len, self.embedding_dim_word])
            # [batch_size*num_sentence,sentence_len,embed_size]

        with tf.name_scope("word_encoder_law"):
            (output_fw, output_bw) = self.bidirectional_rnn(embedding_inputs_reshaped, "word_encoder_law")
            word_hidden_state = tf.concat((output_fw, output_bw), 2)
            # [batch_size*num_sentences,sentence_len,hidden_size * 2]
        # 单词层
        with tf.name_scope("word_attention_law"):
            sentence_vec = self.attention(word_hidden_state, "word_attention_law")
            # [batch_size*num_sentences, hidden_size * 2]
        # 句子层
        with tf.name_scope("sentence_encoder_law"):
            sentence_vec = tf.reshape(sentence_vec, shape=[-1, self.num_sentence_law, self.hidden_size * 2])
            # [batch_size,num_sentences,hidden_size*2]
            output_fw, output_bw = self.bidirectional_rnn(sentence_vec, "sentence_encoder_law")
            sentence_hidden_state = tf.concat((output_fw, output_bw), 2)
            # [batch_size, num_sentences, hidden_size * 2]
            # 这个作为decoder的attention的输入, 我们对句子进行注意力操作.
        with tf.name_scope('sentence_attention_law'):
            doc_vec = self.attention(sentence_hidden_state, "sentence_attention_law")
            # [batch_size, hidden_size * 2]
        with tf.name_scope('dropout_law'):
            h_drop = tf.nn.dropout(doc_vec, self.dropout_keep_prob)
            # 这个作为decorder的初始状态, [batch_size, hidden_size * 2]
        return h_drop, sentence_hidden_state

    def model_decoder(self,enc_fact,enc_fact_att,enc_law):
        with tf.name_scope('dec_init'):
            self.dec_cell = tf.nn.rnn_cell.MultiRNNCell(
                [tf.nn.rnn_cell.LSTMCell(self.embedding_dim_label) for _ in range(self.num_layers)])

            self.w_dec = tf.get_variable('w_dec', [self.embedding_dim_label, self.num_label-1])
            self.b_dec = tf.get_variable('b_dec', [self.num_label-1])
            self.w_dec2= tf.get_variable('w_dec2', [self.hidden_size*4, self.hidden_size*2])
            self.b_dec2 = tf.get_variable('b_dec2', [self.hidden_size * 2])


        with tf.name_scope('dec_label_embed'):
            self.embed_table_lb = tf.get_variable(name='embed_table_lb',
                                             shape=[self.num_label, self.embedding_dim_label],
                                             dtype=tf.float32)
            embed_label = tf.nn.embedding_lookup(self.embed_table_lb, self.batch_label_src)
            embed_label = tf.nn.dropout(embed_label, self.dropout_keep_prob)
            # shape= [batch,None,embed_size]
        with tf.name_scope('dec_attention'):

            attention_mechanism = tf.contrib.seq2seq.LuongAttention(self.embedding_dim_label,
                                                                    enc_fact_att,
                                                                    memory_sequence_length=self.txt_len_att)
            attention_cell = tf.contrib.seq2seq.AttentionWrapper(self.dec_cell,
                                                                 attention_mechanism,
                                                                 attention_layer_size=self.embedding_dim_label,
                                                                 alignment_history=False)
            init_enc = tf.concat([enc_fact,enc_law],axis=-1)  # [batch_size, hidden_size * 2*2],[32, 200]

            init_enc=tf.nn.xw_plus_b(init_enc,self.w_dec2, self.b_dec2)#[32, 100]


            c_state = init_enc
            h_state = init_enc


            lstm_tuple = LSTMStateTuple(c_state, h_state)
            init_state = attention_cell.zero_state(dtype=tf.float32, batch_size=self.batch_size)

            init_state_ = init_state.clone(cell_state=(lstm_tuple, lstm_tuple),
                                           attention=init_enc,
                                           time=0,
                                           alignments=tf.zeros([self.batch_size, tf.shape(enc_fact_att)[1]]),
                                           alignment_history=(),
                                           attention_state=tf.zeros(
                                               [self.batch_size, tf.shape(enc_fact_att)[1]]))
            # 开始循环
            dec_att, attention_state = tf.nn.dynamic_rnn(attention_cell, embed_label, self.label_len,
                                                         dtype=tf.float32, initial_state=init_state_)

            output = tf.reshape(dec_att, [-1, self.embedding_dim_label])  # [batch_size*None,embedding_dim_label]

            logits = tf.matmul(output, self.w_dec) + self.b_dec
            # [batch_size, None, embedding_dim_label]
            return logits, attention_cell

    def train_model(self):

        embedding=self.get_embedding_table()
        enc_out_fact, enc_att_out_fact=self.model_encoder_fact(embedding)
        enc_out_law, enc_att_out_law=self.model_encoder_law(embedding)
        logits,_=self.model_decoder(enc_out_fact, enc_att_out_fact, enc_out_law)

        loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=tf.reshape(self.batch_label_tgt, [-1]),
                                                              logits=logits)
        # [batch_size*max_label_len]  #每个样本对应的标签的数量，最大的那个

        label_weights = tf.sequence_mask(self.label_len, maxlen=tf.shape(self.batch_label_src)[1], dtype=tf.float32)
        # [batch_size,max_label_len]
        label_weights = tf.reshape(label_weights, [-1])
        # [batch_size*max_label_len]

        cost = tf.reduce_sum(loss * label_weights)  # batch中所有的标签loss加和，batch增加，它就增加

        self.cost_per_label = cost / tf.reduce_sum(label_weights)  # 每个样本中每个label上的loss
        self.cost_per_sample = cost / tf.to_float(self.batch_size)  # 每个样本上的loss

        self.cost_summary = tf.summary.scalar('cost', self.cost_per_sample)

        self.train_op = tf.train.AdamOptimizer(learning_rate=self.learning_rate).minimize(self.cost_per_sample)

    def predict_model(self):
        embedding = self.get_embedding_table()
        enc_out_fact, enc_att_out_fact = self.model_encoder_fact(embedding)
        enc_out_law, enc_att_out_law = self.model_encoder_law(embedding)
        _, attention_cell = self.model_decoder(enc_out_fact, enc_att_out_fact, enc_out_law)

        init_enc = tf.concat([enc_out_fact,enc_out_law], axis=-1)  # [batch_size, hidden_size * 2],[32, 100]
        init_enc=tf.nn.xw_plus_b(init_enc, self.w_dec2, self.b_dec2)
        c_state = init_enc
        h_state = init_enc
        lstm_tuple = LSTMStateTuple(c_state, h_state)
        init_state = attention_cell.zero_state(dtype=tf.float32, batch_size=self.batch_size)

        init_state_ = init_state.clone(cell_state=(lstm_tuple, lstm_tuple),
                                       attention=init_enc,
                                       time=0,
                                       alignments=tf.zeros([self.batch_size, tf.shape(enc_att_out_fact)[1]]),
                                       alignment_history=(),
                                       attention_state=tf.zeros(
                                           [self.batch_size, tf.shape(enc_att_out_fact)[1]]))
        init_array = tf.TensorArray(dtype=tf.int32, size=0, dynamic_size=True, clear_after_read=False)
        init_array = init_array.write(0, self.sos_label_id)
        init_loop_var = (init_state_, init_array, 0)

        def continue_loop_condition(state, trg_ids, step):
            return tf.reduce_all(
                tf.logical_and(tf.not_equal(trg_ids.read(step), self.eos_label_id),
                               tf.less(step, self.max_label_num)))

        def loop_body(state, trg_ids, step):
            trg_input = [trg_ids.read(step)]  # [1]
            trg_emb = tf.nn.embedding_lookup(self.embed_table_lb, trg_input)  # [1,embed_size_lb]
            # 循环核心，cell有输入，有输出，
            dec_outputs, next_state = attention_cell.call(state=state, inputs=trg_emb)

            logits = tf.matmul(dec_outputs, self.w_dec)+self.b_dec  #保持和decoder的一致.
            next_id = tf.argmax(logits, axis=1, output_type=tf.int32)
            trg_ids = trg_ids.write(step + 1, next_id[0])
            return next_state, trg_ids, step + 1

        state, trg_ids, step = tf.while_loop(continue_loop_condition, loop_body, init_loop_var)

        self.label_pre = trg_ids.stack()

class Our_model_wo_law_CJO(object):
    #wo denotes without
    def __init__(self, embedding_table, model_config, Train_or_predict="Train"):#

        self.batch_size=model_config.batch_size
        self.num_label=model_config.num_label#标签个数+特殊标签, 106+2,最后进行预测的时候,我们把标签个数设置为106+2-1,就是把其实标签给filter_out

        self.doc_len=model_config.len_doc

        self.num_sentence_fact =model_config.num_sentence_fact#句子个数

        self.learning_rate=model_config.learning_rate
        self.rnn_type=model_config.rnn_type
        self.hidden_size=model_config.hidden_size
        self.num_layers=model_config.num_layers

        self.dropout_keep_prob=model_config.dropout_keep_prob

        self.embedding_table = embedding_table

        self.embedding_dim_word = model_config.embedding_dim_word
        self.embedding_dim_label= model_config.embedding_dim_label

        self.input_fact = tf.placeholder(tf.int32, [self.batch_size, self.doc_len], name='input_fact')

        self.txt_len_att = tf.placeholder(shape=[self.batch_size], dtype=tf.int32)#向attention_cell中放入,实际是num_sentence

        self.batch_label_src = tf.placeholder(shape=[self.batch_size, None],dtype=tf.int32, name='src_label')
        self.batch_label_tgt = tf.placeholder(shape=[self.batch_size, None], dtype=tf.int32, name='tgt_label')
        self.label_len = tf.placeholder(shape=[self.batch_size], dtype=tf.int32)

        self.sos_label_id = model_config.sos_label_id
        self.eos_label_id = model_config.eos_label_id
        self.max_label_num = model_config.max_label_num
        if Train_or_predict=='Train':
            self.train_model()
        else:
            self.predict_model()

    def rnn_cell(self):
        """获取rnn的cell，可选RNN、LSTM、GRU"""
        if self.rnn_type == "vanilla":
            return tf.nn.rnn_cell.BasicRNNCell(self.hidden_size)
        elif self.rnn_type == "lstm":
            return tf.nn.rnn_cell.BasicLSTMCell(self.hidden_size)
        elif self.rnn_type == "gru":
            return tf.nn.rnn_cell.GRUCell(self.hidden_size)
        else:
            raise Exception("rnn_type must be vanilla、lstm or gru!")
    def bidirectional_rnn(self, inputs, name):
        with tf.variable_scope(name):
            fw_cell = self.rnn_cell()
            fw_cell = tf.nn.rnn_cell.DropoutWrapper(fw_cell, output_keep_prob=self.dropout_keep_prob)
            bw_cell = self.rnn_cell()
            bw_cell = tf.nn.rnn_cell.DropoutWrapper(bw_cell, output_keep_prob=self.dropout_keep_prob)
            (output_fw, output_bw), states = tf.nn.bidirectional_dynamic_rnn(cell_fw=fw_cell,cell_bw=bw_cell,
                                                                             inputs=inputs,dtype=tf.float32)
        return output_fw, output_bw
    def attention(self, inputs, name):
        with tf.variable_scope(name):
            # 采用general形式计算权重
            hidden_vec = tf.layers.dense(inputs, self.hidden_size * 2, activation=tf.nn.tanh, name='w_hidden')

            u_context = tf.Variable(tf.truncated_normal([self.hidden_size * 2]), name='u_context')

            alpha = tf.nn.softmax(tf.reduce_sum(tf.multiply(hidden_vec, u_context),
                                                axis=2,
                                                keep_dims=True),
                                  dim=1)
            #alpha=[None*num_sentences,sentence_len]
            #hidden_vec=[None*num_sentences,sentence_len,2*hidden_dim]
            #u_context=[2*hidden_dim],这是个引导向量
            #hidden_vec和u_context相乘后然后reduce_sum,首先shape=[None*num_sentences,sentence_len,2*hidden_dim],
            # 然后reduce_sum,shape=[None*num_sentences,sentence_len,1],然后softmax
            # 对隐藏状态进行加权

            attention_output = tf.reduce_sum(tf.multiply(inputs, alpha), axis=1)

        return attention_output
    def get_embedding_table(self):
        with tf.device("/cpu:0"), tf.name_scope("get_embedding_talbe"):  # 加载词向量用CPU加载
            embedding = tf.Variable(self.embedding_table, trainable=False)#[1469766,100]
        return embedding
    def model_encoder_fact(self,embedding):
        #encoder:
        with tf.device("/cpu:0"), tf.name_scope("embedding_scope"):  # 加载词向量用CPU加载
            input_x = tf.split(self.input_fact, self.num_sentence_fact, axis=1)
            input_x = tf.stack(input_x, axis=1)
            embedding_inputs = tf.nn.embedding_lookup(embedding, input_x)
            sentence_len = int(self.doc_len / self.num_sentence_fact)
            embedding_inputs_reshaped = tf.reshape(embedding_inputs,
                                                   shape=[-1, sentence_len, self.embedding_dim_word])
            # [batch_size*num_sentence,sentence_len,embed_size]
        with tf.name_scope("word_encoder"):
            (output_fw, output_bw) = self.bidirectional_rnn(embedding_inputs_reshaped, "word_encoder")
            word_hidden_state = tf.concat((output_fw, output_bw), 2)
            # [batch_size*num_sentences,sentence_len,hidden_size * 2]
        #单词层
        with tf.name_scope("word_attention"):
            sentence_vec = self.attention(word_hidden_state, "word_attention")
            # [batch_size*num_sentences, hidden_size * 2]
        # 句子层
        with tf.name_scope("sentence_encoder"):
            sentence_vec = tf.reshape(sentence_vec, shape=[-1, self.num_sentence_fact, self.hidden_size * 2])
            # [batch_size,num_sentences,hidden_size*2]
            output_fw, output_bw = self.bidirectional_rnn(sentence_vec, "sentence_encoder")
            sentence_hidden_state = tf.concat((output_fw, output_bw), 2)
            # [batch_size, num_sentences, hidden_size * 2]
            #这个作为decoder的attention的输入, 我们对句子进行注意力操作.

        with tf.name_scope('sentence_attention'):
            doc_vec = self.attention(sentence_hidden_state, "sentence_attention")
            # [batch_size, hidden_size * 2]
        with tf.name_scope('dropout'):
            h_drop = tf.nn.dropout(doc_vec, self.dropout_keep_prob)
            #这个作为decorder的初始状态, [batch_size, hidden_size * 2]

        return h_drop, sentence_hidden_state
        #这两个一个作为encoder的输出[batch_size, hidden_size * 2],
        #一个作为带有时间步的文本信息, 后期decoder中要attention, [batch_size, num_sentences, hidden_size * 2]

    def model_decoder(self,enc_fact,enc_fact_att):
        with tf.name_scope('dec_init'):
            self.dec_cell = tf.nn.rnn_cell.MultiRNNCell(
                [tf.nn.rnn_cell.LSTMCell(self.embedding_dim_label) for _ in range(self.num_layers)])

            self.w_dec = tf.get_variable('w_dec', [self.embedding_dim_label, self.num_label-1])
            self.b_dec = tf.get_variable('b_dec', [self.num_label-1])

        with tf.name_scope('dec_label_embed'):
            self.embed_table_lb = tf.get_variable(name='embed_table_lb',
                                             shape=[self.num_label, self.embedding_dim_label],
                                             dtype=tf.float32)
            embed_label = tf.nn.embedding_lookup(self.embed_table_lb, self.batch_label_src)
            embed_label = tf.nn.dropout(embed_label, self.dropout_keep_prob)
            # shape= [batch,None,embed_size]
        with tf.name_scope('dec_attention'):

            attention_mechanism = tf.contrib.seq2seq.LuongAttention(self.embedding_dim_label,
                                                                    enc_fact_att,
                                                                    memory_sequence_length=self.txt_len_att)
            attention_cell = tf.contrib.seq2seq.AttentionWrapper(self.dec_cell,
                                                                 attention_mechanism,
                                                                 attention_layer_size=self.embedding_dim_label,
                                                                 alignment_history=False)
            init_enc = enc_fact  # [batch_size, hidden_size * 2*2],[32, 200]

            c_state = init_enc
            h_state = init_enc

            lstm_tuple = LSTMStateTuple(c_state, h_state)
            init_state = attention_cell.zero_state(dtype=tf.float32, batch_size=self.batch_size)

            init_state_ = init_state.clone(cell_state=(lstm_tuple, lstm_tuple),
                                           attention=init_enc,
                                           time=0,
                                           alignments=tf.zeros([self.batch_size, tf.shape(enc_fact_att)[1]]),
                                           alignment_history=(),
                                           attention_state=tf.zeros(
                                               [self.batch_size, tf.shape(enc_fact_att)[1]]))
            # 开始循环
            dec_att, attention_state = tf.nn.dynamic_rnn(attention_cell, embed_label, self.label_len,
                                                         dtype=tf.float32, initial_state=init_state_)

            output = tf.reshape(dec_att, [-1, self.embedding_dim_label])  # [batch_size*None,embedding_dim_label]

            logits = tf.matmul(output, self.w_dec) + self.b_dec
            # [batch_size, None, embedding_dim_label]
            return logits, attention_cell

    def train_model(self):

        embedding=self.get_embedding_table()
        enc_out_fact, enc_att_out_fact=self.model_encoder_fact(embedding)

        logits,_=self.model_decoder(enc_out_fact, enc_att_out_fact)

        loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=tf.reshape(self.batch_label_tgt, [-1]),
                                                              logits=logits)
        # [batch_size*max_label_len]  #每个样本对应的标签的数量，最大的那个

        label_weights = tf.sequence_mask(self.label_len, maxlen=tf.shape(self.batch_label_src)[1], dtype=tf.float32)
        # [batch_size,max_label_len]
        label_weights = tf.reshape(label_weights, [-1])
        # [batch_size*max_label_len]

        cost = tf.reduce_sum(loss * label_weights)  # batch中所有的标签loss加和，batch增加，它就增加

        self.cost_per_label = cost / tf.reduce_sum(label_weights)  # 每个样本中每个label上的loss
        self.cost_per_sample = cost / tf.to_float(self.batch_size)  # 每个样本上的loss

        self.cost_summary = tf.summary.scalar('cost', self.cost_per_sample)

        self.train_op = tf.train.AdamOptimizer(learning_rate=self.learning_rate).minimize(self.cost_per_sample)

    def predict_model(self):
        embedding = self.get_embedding_table()
        enc_out_fact, enc_att_out_fact = self.model_encoder_fact(embedding)

        _, attention_cell = self.model_decoder(enc_out_fact, enc_att_out_fact)

        init_enc = enc_out_fact  # [batch_size, hidden_size * 2],[32, 100]

        c_state = init_enc
        h_state = init_enc
        lstm_tuple = LSTMStateTuple(c_state, h_state)
        init_state = attention_cell.zero_state(dtype=tf.float32, batch_size=self.batch_size)

        init_state_ = init_state.clone(cell_state=(lstm_tuple, lstm_tuple),
                                       attention=init_enc,
                                       time=0,
                                       alignments=tf.zeros([self.batch_size, tf.shape(enc_att_out_fact)[1]]),
                                       alignment_history=(),
                                       attention_state=tf.zeros(
                                           [self.batch_size, tf.shape(enc_att_out_fact)[1]]))
        init_array = tf.TensorArray(dtype=tf.int32, size=0, dynamic_size=True, clear_after_read=False)
        init_array = init_array.write(0, self.sos_label_id)
        init_loop_var = (init_state_, init_array, 0)

        def continue_loop_condition(state, trg_ids, step):
            return tf.reduce_all(
                tf.logical_and(tf.not_equal(trg_ids.read(step), self.eos_label_id),
                               tf.less(step, self.max_label_num)))

        def loop_body(state, trg_ids, step):
            trg_input = [trg_ids.read(step)]  # [1]
            trg_emb = tf.nn.embedding_lookup(self.embed_table_lb, trg_input)  # [1,embed_size_lb]
            # 循环核心，cell有输入，有输出，
            dec_outputs, next_state = attention_cell.call(state=state, inputs=trg_emb)

            logits = tf.matmul(dec_outputs, self.w_dec)+self.b_dec  #保持和decoder的一致.
            next_id = tf.argmax(logits, axis=1, output_type=tf.int32)
            trg_ids = trg_ids.write(step + 1, next_id[0])
            return next_state, trg_ids, step + 1

        state, trg_ids, step = tf.while_loop(continue_loop_condition, loop_body, init_loop_var)

        self.label_pre = trg_ids.stack()


