import tensorflow as tf
from tensorflow.contrib.rnn import LSTMCell,LSTMStateTuple,MultiRNNCell,stack_bidirectional_dynamic_rnn


class Model():
    def __init__(self,num_layers,hidden_size,max_seq_len,
                 outputs_size,input_vec_len,bidirectional,
                 learning_rate,clip_grad):
        self._num_layers=num_layers      # 隐藏层数量
        self._hidden_size=hidden_size    # 隐藏层神经元数量
        self._max_seq_len=max_seq_len    # 输入序列最大长度
        self._outputs_size=outputs_size  # 输出维度
        self._input_vec_len=input_vec_len    # 输入向量的维度
        self._bidirectional=bidirectional    # 网络结构
                                             # True: net为双向LSTM
                                             # False: net为一般LSTM
        self._learning_rate=learning_rate    # 学习率
        self._clip_grad=clip_grad       # 梯度修剪

        # 创建模型
        self._makeGraph()

    def _initPlaceholder(self):
        # 创建占位符
        self.labels=tf.placeholder(dtype=tf.float32,shape=[None,None],
                                   name="labels")
        self.inputs_data=tf.placeholder(dtype=tf.float32,shape=[None,None,self._input_vec_len],
                                        name="inputs_data")
        self.dropout=tf.placeholder(dtype=tf.float32,
                                    shape=[],
                                    name="dropout")

    def _makeGraph(self):
        # 创建模型
        self._initPlaceholder()     # 创建占位符
        outputs, state=LSTMlayer(inputs_data=self.inputs_data,
                                 num_layers=self._num_layers,
                                 hidden_size=self._hidden_size,
                                 bidirectional=self._bidirectional)
        lstm_output=tf.nn.dropout(state.h,keep_prob=self.dropout)
        self.logits=outputLayer(input_data=lstm_output,
                                outputs_size=self._outputs_size,
                                activation=None,
                                name="prediction")

        self.loss=lossOp(logits=self.logits,labels=self.labels)  # 计算损失
        self.train_op=optimizerOp(loss=self.loss,
                                  learning_rate=self._learning_rate,
                                  clip_grad=self._clip_grad)


def getLSTMCell(hidden_size):
    """
    获取lstm单元
    :param hidden_size: 隐藏层神经元数量，int
    :return:
    """
    lstm_cell=LSTMCell(num_units=hidden_size,
                       initializer=tf.random_uniform_initializer(
                           minval=-0.1,maxval=0.1))
    return lstm_cell


def LSTMlayer(inputs_data,num_layers,hidden_size,bidirectional):
    # 判断是否为双向LSTM并创建net
    if bidirectional:
        cells_fw=[getLSTMCell(hidden_size) for _ in range(num_layers)]
        cells_bw=[getLSTMCell(hidden_size) for _ in range(num_layers)]
        outputs, states_fw,states_bw = stack_bidirectional_dynamic_rnn(
            cells_fw=cells_fw, cells_bw=cells_bw, inputs=inputs_data,
            dtype=tf.float32)

        # 对双向LSTM的outputs和states进行处理
        # state_c = tf.concat([states_fw[0].c, states_bw[0].c], axis=-1)
        # state_h = tf.concat([states_fw[0].h, states_bw[0].h], axis=-1)
        state_c = tf.add(states_fw[0].c, states_bw[0].c)
        state_h = tf.add(states_fw[0].h, states_bw[0].h)
        state = LSTMStateTuple(c=state_c, h=state_h)

    else:
        cells = MultiRNNCell(cells=[getLSTMCell(hidden_size)
                                            for _ in range(num_layers)])
        outputs, state = tf.nn.dynamic_rnn(cell=cells,
                                           inputs=inputs_data,
                                           dtype=tf.float32)
        state=state[0]
    return outputs, state


def outputLayer(input_data,outputs_size,activation,name):
    """
    输出层
    :param input_data: 输入数据
    :param outputs_size: 输出维度,int
    :param activation: 激活函数。如果为None，则使用线性激活
    :param name: 名字
    :return:
    """
    prediction=tf.layers.dense(inputs=input_data,
                               units=outputs_size,
                               activation=activation,
                               kernel_initializer=tf.truncated_normal_initializer(),
                               name=name)  # 权重矩阵初始化
    return prediction


def lossOp(logits,labels):
    """
    损失函数
    :param logits: 预测值,shape[batch_size,num_classes]
    :param labels: 标签
    :return:
    """
    loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits,
                                                                     labels=labels))
    return loss


def optimizerOp(loss,learning_rate,clip_grad=None):
    """
    优化器
    :param loss: 损失值
    :param learning_rate: 学习率
    :param clip_grad: 梯度值,int
    :return:
    """
    optimizer = tf.train.AdamOptimizer(learning_rate)
    if clip_grad is None:
        train_op=optimizer.minimize(loss)
    else:
        # 梯度修剪
        grads_and_vars = optimizer.compute_gradients(loss)
        # for g, v in grads_and_vars:
        #     print(g)
        #     print(v)
        #     print()
        # exit()
            # print(tf.clip_by_value(g, -clip_grad, clip_grad))
        grads_and_vars_clip = [[tf.clip_by_value(g, -clip_grad, clip_grad), v] for g, v in grads_and_vars]
        train_op = optimizer.apply_gradients(grads_and_vars_clip)
    return train_op
