# coding=utf-8
import numpy as np
import tensorflow as tf
# import readembed
import readembed


class RNN_Model(object):
    def __init__(self, config, is_training):


        self.keep_prob = config.keep_prob
        # self.batch_size=tf.Variable(0,dtype=tf.int32,trainable=False)
        batch_size = config.batch_size
        class_num = config.class_num
        class_num_2 = config.class_num_2
        embedFile = config.embedFile

        hidden_neural_size = config.hidden_neural_size
        # hidden_neural_size2=config.hidden_neural_size2
        num_step = config.num_step
        wordembedding_only = config.wordembedding_only
        get_entity_only = config.get_entity_only
        lstm_all = config.lstm_all
        # self.input_data = tf.placeholder( tf.int32, [batch_size, num_step],name = "input_data")

        self.input_data = tf.placeholder(tf.int32, [None, num_step], name = "input_data")
        self.input_data_label = tf.placeholder(tf.float32, [batch_size, num_step, hidden_neural_size * 2],
                                               name = "input_data_label")
        # 单纯的每个词的标签，不匹配数据格式
        self.input_label = tf.placeholder(tf.int32, [None, num_step], name = "input_label")

        # print self.input_data
        self.target = tf.placeholder(tf.float32, [None, class_num ], name = "target")
        self.entity_posi=tf.placeholder(tf.int32,[None,2],name='entity_position')

        # self.target=tf.placeholder(tf.int64,None)
        #self.mask_x = tf.placeholder(tf.float32, [num_step, None], name = "mask_x")

        # 位置特征向量,将要与词向量级联
        self.position = tf.placeholder(tf.int32, [None, num_step * 2], name = "position")
        pos_shape = tf.reshape(self.position, [batch_size, num_step, 2])  # 位置向量
        # print pos_shape
        PF_size = config.PF_size

        # self.keep_prob=tf.placeholder(tf.float32,name="keep_prob")
        vocabulary_size = config.vocabulary_size  # 词典大小
        embed_dim = config.embed_dim  # 词向量维度
        hidden_layer_num = config.hidden_layer_num  # 隐藏层数量
        # self.new_batch_size=tf.placeholder(tf.int32,shape=[],name="new_batch_size") #批处理大小
        # self._batch_size_update=tf.assign(self.batch_size,self.new_batch_size)#改变批处理大小
        # print self._batch_size_update

        filter_sizes = config.filter_sizes  # 录波器大小的列表
        num_filters = config.num_filters  # 相同大小的滤波器有几个
        self.filter_total=len(filter_sizes)*num_filters
        self.conv_W_projection = tf.get_variable("conv_W_project", shape=[self.filter_total, class_num])
        #self.conv_W_projection = tf.get_variable("conv_W_project", shape=[hidden_neural_size, class_num])
        self.conv_b_projection = tf.get_variable("conv_b_project", shape=[class_num])
        # 位置特征有二维映射到高维
        def positionfeature(position_shape, position_feature_size):

            with tf.name_scope("Position_feature"):
                PF_vec = []
                PF_w = tf.get_variable("PF_w", [2, position_feature_size], dtype = tf.float32)  # 位置向量映射矩阵
                # PF_vec = tf.matmul(self.position, PF_w)  # batch_size * PF_size  ,每一行是映射得到的位置向量
                for i in range(batch_size):
                    vec = tf.reshape(tf.slice(tf.cast(position_shape, dtype = tf.float32), [i, 0, 0], [1, num_step, 2]),
                                     [num_step, 2])
                    pf_vec = tf.matmul(vec, PF_w)  # num_step * PF_size  ,每一行是每个词映射得到的高维位置向量
                    PF_vec.append(pf_vec)  # [batch_size,num_step,PF_size]
                return PF_vec
        def entity_match(input_embed):
            with tf.name_scope('entity_match'):
                e1=self.entity_posi[:,0]
                e2=self.entity_posi[:,1]
                indice_e1=[]
                indice_e2=[]
                for i in range(batch_size):

                    indice_e1.append([i,e1[i]])
                    indice_e2.append([i,e2[i]])
                e1_embed=tf.gather_nd(input_embed,indice_e1)
                e2_embed=tf.gather_nd(input_embed,indice_e2)

            return e1_embed,e2_embed



        #

        # embedding layer
        with tf.device("/cpu:0"), tf.name_scope("embedding_layer"):  # 指定在cpu上运行而不是GPU，指定这一层的名字
            # embedding=tf.get_variable("embedding",[vocabulary_size,embed_dim],dtype=tf.float32)#指定词向量的维度，每一维的数据格式
            embedId, embeddings, words = readembed.readEmbedFile(embedFile)
            # print embeddings

            inputs = tf.nn.embedding_lookup(embeddings, self.input_data)  # lookup输入表，对应每个词的词向量
            # print inputs
            e1_embed, e2_embed = entity_match(inputs)
            entity_w = tf.get_variable('entity_w', [embed_dim,embed_dim+PF_size], dtype=tf.float32)
            relation = tf.matmul(tf.subtract(e2_embed, e1_embed),entity_w)
            #relation=tf.subtract(e2_embed, e1_embed)
            relation=tf.reshape(relation,shape=[batch_size,1,embed_dim+PF_size])

            #batch_relation=tf.tile(relation,multiples=[1,num_step,1])



            # 将词向量组成的句子的每一个词与位置特征向量级联
            if not wordembedding_only:
                # 位置向量联合
                inputs = tf.concat([inputs, positionfeature(pos_shape, PF_size)],
                                   2)  # 版本问题，0.XX之前，数字在前，张量在后，1.0之后，数字在后，张量之前。
                print(inputs)

        input_new=tf.concat([inputs,relation],1)

        lstm_cell_fw = tf.nn.rnn_cell.BasicLSTMCell(hidden_neural_size, forget_bias=1.0,
                                                    state_is_tuple=True)  # 建立基本的lstm网络

        lstm_cell_bw = tf.nn.rnn_cell.BasicLSTMCell(hidden_neural_size, forget_bias=1.0,
                                                    state_is_tuple=True)  # 建立基本的lstm网络
        if self.keep_prob < 1:
            lstm_cell_fw = tf.nn.rnn_cell.DropoutWrapper(
                lstm_cell_fw, output_keep_prob=self.keep_prob
            )  # 使用Dropout方法

            lstm_cell_bw = tf.nn.rnn_cell.DropoutWrapper(
                lstm_cell_bw, output_keep_prob=self.keep_prob
            )  # 使用Dropout方法
        cell_fw = tf.nn.rnn_cell.MultiRNNCell([lstm_cell_fw] * hidden_layer_num, state_is_tuple=True)

        cell_bw = tf.nn.rnn_cell.MultiRNNCell([lstm_cell_bw] * hidden_layer_num, state_is_tuple=True)

        # 神经元状态初始化
        self.initial_state_fw = cell_fw.zero_state(batch_size, tf.float32)
        self.initial_state_bw = cell_bw.zero_state(batch_size, tf.float32)
        if is_training and self.keep_prob < 1:
            #input_new = tf.nn.dropout(input_new, self.keep_prob)
            inputs = tf.nn.dropout(inputs, self.keep_prob)
        with tf.variable_scope("bilstm_layer"):

            # forward and backward
            self.output, outputstates = tf.nn.bidirectional_dynamic_rnn(
                cell_fw,
                cell_bw,
                inputs,
                # self.inputs_emb,
                tf.constant(num_step, shape = [batch_size]),
                self.initial_state_fw,
                self.initial_state_bw,
                dtype = tf.float32
                )
            # print self.output

        #self.LSTM_out=outputstates[0][0].h
        self.LSTM_out=self.output[0]


        def mean_pooling(output, mask_x):  # output=self.output   mask_x=self.output
            with tf.name_scope("mean_pooling_layer"):  # 定义平均池化层,对应LSTM
                out_put = output * mask_x[:, :, None]
                out_put = tf.reduce_sum(out_put, 0) / (tf.reduce_sum(mask_x, 0)[:, None])  # 计算这一层的输出
                # out_put=tf.reduce_sum(self.out,1)/tf.reduce_sum(self.input_data_label,1)#计算这一层的输出
                # print out_put
                # print tf.reduce_sum(self.input_data_label,1)

            return out_put

        # self.outputs = tf.reshape(tf.concat(self.output,1 ),
        #                           [batch_size, num_step, hidden_neural_size * 2])
        # self.out_put = tf.reshape(tf.concat(self.output,1 ),
        #                           [batch_size * num_step, hidden_neural_size * 2])

        def get_entity(input_label, out_put, num_step, batch_size, hidden_neural_size):
            label_1 = tf.cast(tf.argmax(input_label, 1), dtype = tf.int32)
            ind = tf.range(0, batch_size * num_step, num_step)
            label_1 = tf.add(label_1, ind)
            s = tf.reverse(input_label, [False, True])  # 将矩阵的每一行序列反向排列
            s1 = tf.argmax(s, 1)  # 反向后取每一行的最大值的索引
            mask = tf.constant(num_step - 1, dtype = tf.int64, shape = [batch_size])
            label_2 = tf.cast(tf.subtract(mask, s1), dtype = tf.int32)  # 一个句子中第二个关系实体的索引
            label_2 = tf.add(label_2, ind)

            r_word1 = tf.gather(out_put, label_1, name = 'r_word1')  # 得到二维数组，batch_size,hiddensize*2,每个样本第一个关系词
            r_word2 = tf.gather(out_put, label_2, name = 'r_word2')  # 得到二维数组，batch_size,hiddensize*2,每个样本第二个关系词

            # 将二维矩阵变成三维

            r_word1_3 = tf.reshape(r_word1, [batch_size, 1, hidden_neural_size * 2])
            r_word2_3 = tf.reshape(r_word2, [batch_size, 1, hidden_neural_size * 2])

            r1_batch_sample = []
            r2_batch_sample = []
            for i in range(batch_size):
                sample1 = tf.gather(r_word1_3, i)
                sample2 = tf.gather(r_word2_3, i)

                r1_batch_sample.append(sample1)  # 该list的每一元素是一个样本（二维矩阵）第一个关系词
                r2_batch_sample.append(sample2)  # 该list的每一元素是一个样本（二维矩阵）第一个关系词

            out = tf.concat(1, [r1_batch_sample, r2_batch_sample])
            return out

        def get_relation_word(input_label, lstm_outputs, num_step, batch_size, hidden_neural_size):
            label_1 = tf.cast(tf.argmax(input_label, 1), dtype = tf.int32)
            s = tf.reverse(input_label, [False, True])  # 将矩阵的每一行序列反向排列
            s1 = tf.argmax(s, 1)  # 反向后取每一行的最大值的索引
            mask = tf.constant(num_step - 1, dtype = tf.int64, shape = [batch_size])
            label_2 = tf.cast(tf.subtract(mask, s1), dtype = tf.int32)  # 一个句子中第二个关系实体的索引

            nrword = []
            self.dis = tf.cast(tf.argmax(tf.subtract(label_2, label_1) + 1, 1), dtype = tf.int32)  # 在最大化翅中用到
            temp = tf.split(0, batch_size, lstm_outputs)

            for i in range(batch_size):
                nr_ind = tf.range(tf.gather(label_1, i), tf.gather(label_2, i) + 1, 1)  # 每一个样本的关系词之间的词的位置索引

                nr_word = tf.gather(tf.reshape(temp[i], [num_step, hidden_neural_size * 2]), nr_ind)

                # print nr_word
                con = 15 - (tf.subtract(tf.gather(label_2, i), tf.gather(label_1, i)) + 1)
                nr_word_p = tf.cond(con > 0, lambda: tf.pad(nr_word, [[0, con], [0, 0]]),
                                    lambda: tf.slice(nr_word, [0, 0], [15, hidden_neural_size * 2]))
                # nr_word_p=tf.pad(nr_word,[[0,tf.max(0,15-(tf.sub(tf.gather(label_2,i),tf.gather(label_1,i))+1))],[0,0]])  #设两个关系词之间的最大长队为20，补零
                nrword.append(nr_word_p)
            out = nrword
            return out

        # if lstm_all:
        #     self.out = self.outputs
        #
        # if get_entity_only:
        #     print ("只输入实体到CNN中")
        #     self.out = get_entity(self.input_label, self.out_put, num_step, batch_size, hidden_neural_size)
        #     print self.out
        # if not lstm_all and not get_entity_only:
        #     self.out = get_relation_word(self.input_label, self.outputs, num_step, batch_size, hidden_neural_size)


        # 1.sentence embedding 2.convocation layer 3.max-pooling 4.liner-classification

        self.sentence_embedding_fw = self.output[0]  # [batch_size,sentence_length,hidden_size]
        self.sentence_embedding_bw = self.output[1]

        # 2.=====>loop each filter size. for each filter, do:convolution-pooling layer(a.create filters,b.conv,c.apply nolinearity,d.max-pooling)--->
        # you can use:tf.nn.conv2d;tf.nn.relu;tf.nn.max_pool; feature shape is 4-d. feature is a new variable
        pooled_outputs = []
        pooled_fw_outputs=[]
        pooled_bw_outputs=[]
        for i, filter_size in enumerate(filter_sizes):
            with tf.variable_scope("convolution-pooling-%s" % filter_size):
                # ====>a.create filter
                print(i)
                print(filter_size)

                filter = tf.get_variable("filter1-%s"%filter_size,
                                         [int(filter_size), hidden_neural_size, num_filters])
                filter2 = tf.get_variable("filter2-%s"%filter_size,
                                         [int(filter_size), hidden_neural_size, num_filters])

                # ====>b.conv operation: conv2d===>computes a 2-D convolution given 4-D `input` and `filter` tensors.
                # Conv.Input: given an input tensor of shape `[batch, in_height, in_width, in_channels]` and a filter / kernel tensor of shape `[filter_height, filter_width, in_channels, out_channels]`
                # Conv.Returns: A `Tensor`. Has the same type as `input`.
                #         A 4-D tensor. The dimension order is determined by the value of `data_format`, see below for details.
                # 1)each filter with conv2d's output a shape:[1,sequence_length-filter_size+1,1,1];2)*num_filters--->[1,sequence_length-filter_size+1,1,num_filters];3)*batch_size--->[batch_size,sequence_length-filter_size+1,1,num_filters]
                # input data format:NHWC:[batch, height, width, channels];output:4-D
                conv_fw = tf.nn.conv1d(self.sentence_embedding_fw, filter, 1, padding = 'SAME',
                                       name = 'conv_fw')  # shape:[batch_size,sequence_length,num_filter]
                conv_bw = tf.nn.conv1d(self.sentence_embedding_bw, filter2, 1, padding = 'SAME', name = 'conv_bw')

                # ====>c. apply nolinearity
                b_fw = tf.get_variable("b-fw-%s" % filter_size, [num_filters])  # ADD 2017-06-09
                h_fw = tf.expand_dims(tf.nn.relu(tf.nn.bias_add(conv_fw, b_fw), "relu"),
                                      2)  # shape:[batch_size,sequence_length ,num_filters]. tf.nn.bias_add:adds `bias` to `value`

                b_bw = tf.get_variable("b-bw-%s" % filter_size, [num_filters])
                h_bw = tf.expand_dims(tf.nn.relu(tf.nn.bias_add(conv_bw, b_bw)),
                                      2)  # 将三维输出变成四维，符合maxpooling的要求输入形式 #shape：[batch_size,sequence_length ,1,num_filter]

                # ====>. max-pooling.  value: A 4-D `Tensor` with shape `[batch, height, width, channels]
                #                  ksize: A list of ints that has length >= 4.  The size of the window for each dimension of the input tensor.
                #                  strides: A list of ints that has length >= 4.  The stride of the sliding window for each dimension of the input tensor.
                pooled_fw = tf.nn.max_pool(h_fw, ksize = [1, num_step, 1, 1],
                                           # self.sequence_length - filter_size + 1
                                           strides = [1, 1, 1, 1], padding = 'VALID',
                                           name = "pool-fw")  # shape:[batch_size, 1, 1, num_filters].max_pool:performs the max pooling on the input.
                pooled_bw = tf.nn.max_pool(h_bw, ksize = [1, num_step, 1, 1],
                                           strides = [1, 1, 1, 1], padding = 'VALID',
                                           name = 'pool-bw')
                pooled = tf.concat([pooled_fw, pooled_bw], 3)  # shape:[batch_size,1,1,2*num_filters]
                pooled_fw_outputs.append(pooled_fw)
                pooled_bw_outputs.append(pooled_bw)
                pooled_outputs.append(pooled)


        # 3.=====>combine all pooled features, and flatten the feature.output' shape is a [1,None]
        # e.g. >>> x1=tf.ones([3,3]);x2=tf.ones([3,3]);x=[x1,x2]
        #         x12_0=tf.concat(x,0)---->x12_0' shape:[6,3]
        #         x12_1=tf.concat(x,1)---->x12_1' shape;[3,6]
        self.h_pool_fw=tf.concat(pooled_fw_outputs,3)
        self.h_pool_bw=tf.concat(pooled_bw_outputs,3)
        self.h_pool = tf.concat(pooled_outputs,
                                3)  # shape:[batch_size, 1, 1, num_filters_total]. tf.concat=>concatenates tensors along one dimension.where num_filters_total=num_filters_1+num_filters_2+num_filters_3
        print(self.h_pool)
        self.h_pool_flat_fw=tf.reshape(self.h_pool_fw,[-1,self.filter_total])
        self.h_pool_flat_bw=tf.reshape(self.h_pool_bw,[-1,self.filter_total])
        self.h_pool_flat = tf.reshape(self.h_pool, [-1,
                                                    2*self.filter_total])  #双向 2*self.filter_total# shape should be:[None,num_filters_total]. here this operation has some result as tf.sequeeze().e.g. x's shape:[3,3];tf.reshape(-1,x) & (3, 3)---->(1,9)
        print(self.h_pool_flat)
        self.out=self.h_pool_flat_fw
        # 4.=====>add dropout: use tf.nn.dropout
        if is_training:
            with tf.name_scope("dropout"):
                self.out = tf.nn.dropout(self.out,
                                            keep_prob = self.keep_prob)  # [None,num_filters_total]

        # self.saver = tf.train.Saver()

        # 5. logits(use linear layer)and predictions(argmax)


        # with tf.variable_scope("attention_layer"):
        #
        #     # filter_shape=[1,1,num_step,hidden_neural_size*2]
        #     # att_w = tf.get_variable("att_w", shape = filter_shape, dtype = tf.float32)
        #
        #     # outputs=tf.expand_dims(self.outputs,-1)
        #     # att_m=tf.nn.conv2d(outputs,att_w,strides = [1,1,1,1],padding ='SAME' )  #wH
        #     # print att_m
        #     # att_m=tf.reshape(att_m,[hidden_neural_size*2])
        #     # print att_m
        #     att_w=tf.get_variable("att_w",shape = [self.filter_total,1],dtype = tf.float32)
        #     lstm_out = tf.split(self.outputs, batch_size, 0)
        #
        #     cnn_out_fw=tf.split(self.h_pool_flat_fw,batch_size,0)
        #     cnn_out_bw=tf.split(self.h_pool_flat_bw,batch_size,0)
        #
        #
        #     #att_a=[]
        #
        #     att_out=[]
        #     for i in range(batch_size):
        #
        #         att_a=tf.nn.softmax(tf.matmul(tf.nn.tanh(tf.reshape([cnn_out_fw[i],cnn_out_bw[i]],[2,self.filter_total])),att_w))
        #
        #         att_a_step=tf.split(att_a,2,0)
        #
        #         #lstm_out_step=tf.split(lstm_out[i],num_step,1)
        #         cnn_out_step=[cnn_out_fw[i],cnn_out_bw[i]]
        #         #print lstm_out_step
        #         lstm_att = []
        #         cnn_att=[]
        #         # for j in range(num_step):
        #         #     lstm_att_temp=tf.reshape(att_a_step[j],shape = [1])*tf.reshape(lstm_out_step[j],shape = [hidden_neural_size*2])
        #         #     #print lstm_att_temp
        #         #     lstm_att.append(lstm_att_temp)
        #         # lstm_att_Ten=tf.convert_to_tensor(lstm_att)
        #         #print lstm_att_Ten
        #         for j in range(2):
        #             cnn_att_temp=tf.reshape(att_a_step[j],shape = [1])*tf.reshape(cnn_out_step[j],shape = [self.filter_total])
        #             cnn_att.append(cnn_att_temp)
        #
        #         cnn_att_Ten=tf.convert_to_tensor(cnn_att)
        #         att_out.append(cnn_att_Ten)
        #         #att_out.append(lstm_att_Ten)
        #     att_out=tf.convert_to_tensor(att_out)
        #
        #     self.out=tf.reshape(att_out,[batch_size,2*self.filter_total])

        tf.get_variable_scope().reuse_variables()



        with tf.name_scope("output"):
            # self.logits = tf.matmul(self.h_drop,
            #                        self.conv_W_projection) + self.conv_b_projection  # shape:[None, self.num_classes]==tf.matmul([None,self.embed_size],[self.embed_size,self.num_classes])

            self.logits=tf.matmul(self.out,self.conv_W_projection)+self.conv_b_projection
        with tf.name_scope("loss"):
            l2_lambda = 0.001
            # input: `logits` and `labels` must have the same shape `[batch_size, num_classes]`
            # output: A 1-D `Tensor` of length `batch_size` of the same type as `logits` with the softmax cross entropy loss.
            losses = tf.nn.sparse_softmax_cross_entropy_with_logits(labels = tf.argmax(self.target, 1),
                                                                    logits = self.logits)  # sigmoid_cross_entropy_with_logits.#losses=tf.nn.softmax_cross_entropy_with_logits(labels=self.input_y,logits=self.logits)
            # print("1.sparse_softmax_cross_entropy_with_logits.losses:",losses) # shape=(?,)
            loss = tf.reduce_mean(losses)  # print("2.loss.loss:", loss) #shape=()
            l2_losses = tf.add_n(
                [tf.nn.l2_loss(v) for v in tf.trainable_variables() if 'bias' not in v.name]) * l2_lambda
            self.loss = loss + l2_losses
            # print self.cost
        with tf.name_scope("accuracy"):  # 计算模型的精确度
            self.prediction = tf.argmax(self.logits, 1, name = "prediction_cnn")
            correct_prediction = tf.equal(self.prediction, tf.argmax(self.target, 1))  # 网络的预测值与标签相等时表示预测正确

            self.correct_num = tf.reduce_sum(tf.cast(correct_prediction, tf.float32))  # cnn预测正确的数目

            self.accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32),
                                           name = "accuracy")  # 平均精确度作为网络最终的精确度
            # self.accuracy = tf.divide(self.correct_num,batch_size)

            # # 二分类各部分
            # self.prediction_other = tf.argmax(self.logits_other, 1, name = "prediction_other")
            # correct_prediction_other = tf.equal(tf.cast(self.prediction_other, tf.int32), self.target_other)
            # self.correct_num_other = tf.reduce_sum(tf.cast(correct_prediction_other, tf.float32))
            # self.accuracy_other = tf.divide(self.correct_num_other, batch_size)

        # add summary
        loss_summary = tf.summary.scalar("loss", self.loss)  # 讲每个时刻，每个batch的损失函数加起来得到的值就是整个交叉熵公式的右半部分（全部样本）
        # add summary
        accuracy_summary = tf.summary.scalar("accuracy_summary", self.accuracy)
        # 只在训练模型时定义反向传播操作
        if not is_training:
            return
        self.globe_step = tf.Variable(0, name = "globe_step", trainable = False)
        self.lr = tf.Variable(0.0, trainable = False)

        tvars = tf.trainable_variables()  # 定义反向传播中的变量

        # 通过clip_by_global_norm函数控制梯度的大小，避免梯度膨胀的问题
        grads, _ = tf.clip_by_global_norm(tf.gradients(self.loss, tvars), config.max_grad_norm)

        # keep track of gradient values and sparsity(optional)
        grad_summaries = []
        for g, v in zip(grads, tvars):
            if g is not None:
                grad_hist_summary = tf.summary.histogram("{}/grad/hist".format(v.name), g)
                sparsity_summary = tf.summary.scalar("{}/grad/sparsity".format(v.name), tf.nn.zero_fraction(g))
                grad_summaries.append(grad_hist_summary)
                grad_summaries.append(sparsity_summary)
        self.grad_summaries_merged = tf.summary.merge(grad_summaries)
        self.summary = tf.summary.merge([loss_summary, accuracy_summary, self.grad_summaries_merged])

        #定义优化方法
        optimizer = tf.train.GradientDescentOptimizer(self.lr)
        #optimizer=tf.train.AdamOptimizer(self.lr)
        optimizer.apply_gradients(zip(grads, tvars))

        self.train_op = optimizer.apply_gradients(zip(grads, tvars), self.globe_step)
        #定义训练步骤
        #self.train_op=optimizer.apply_gradients(zip(grads,tvars))



        self.new_lr = tf.placeholder(tf.float32, shape = [], name = "new_learning_rate")
        self._lr_update = tf.assign(self.lr, self.new_lr)  # 当训练超过规定的周期数，改变学习速率

    def assign_new_lr(self, session, lr_value):
        session.run(self._lr_update, feed_dict = {self.new_lr: lr_value})  # 设定输入和目标target的值
        # def assign_new_batch_size(self,session,batch_size_value):
        # session.run(self._batch_size_update,feed_dict={self.new_batch_size:batch_size_value})
