# coding:utf-8
'''
author:wangyi
'''
import tensorflow as tf



class RNN:

    def __init__(self,max_seq_length,feature_dim,num_units,learning_rate,mode='gru'):

        self.mode = mode
        self.max_seq_length = max_seq_length
        self.feature_dim = feature_dim
        self.num_units = num_units
        self.learning_rate = learning_rate

        self.input_x = tf.placeholder(dtype=tf.float32,shape=[None,self.max_seq_length,self.feature_dim],name='input_x')
        self.input_y = tf.placeholder(dtype=tf.float32,shape=[None,1],name='input_y')
        self.batch_size = tf.placeholder(shape=[],dtype=tf.int32)
        self.sequence_length = tf.placeholder(shape=[None,],dtype=tf.int32)
        self.outputs = self.rnn(self.mode)[:,-1,:]
        self.y_preds = self.regress(self.outputs)
        self.loss_v = tf.reduce_mean(tf.abs((self.y_preds - self.input_y)))
        self.loss()


    def rnn(self,mode):
        with tf.name_scope('rnn'):
            if mode == 'gru':
                cell = tf.nn.rnn_cell.GRUCell(num_units=self.num_units)
                b_cell = tf.nn.rnn_cell.GRUCell(num_units=self.num_units)
            elif mode == 'lstm':
                cell = tf.nn.rnn_cell.BasicLSTMCell(num_units=self.num_units)
                b_cell = tf.nn.rnn_cell.BasicLSTMCell(num_units=self.num_units)
            else:
                cell = tf.nn.rnn_cell.BasicRNNCell(num_units=self.num_units)
                b_cell = tf.nn.rnn_cell.BasicRNNCell(num_units=self.num_units)
            cell_zero = cell.zero_state(self.batch_size,dtype=tf.float32)
            b_cell_zero = b_cell.zero_state(self.batch_size,dtype=tf.float32)
            #outputs,states = tf.nn.dynamic_rnn(cell,self.input_x,self.sequence_length,cell_zero)

            outputs, states = tf.nn.bidirectional_dynamic_rnn(cell, b_cell,self.input_x, self.sequence_length, cell_zero,b_cell_zero)
            outputs = tf.concat(outputs,2)
            return outputs

    def regress(self,h):
        with tf.variable_scope('logits'):
            self.w_r = tf.get_variable('w',shape=[2*self.num_units,1],dtype=tf.float32,initializer=tf.contrib.layers.xavier_initializer())
            self.b_r = tf.Variable(tf.constant(0.0,dtype=tf.float32,shape=[1]))
            y_preds = tf.nn.relu(tf.matmul(h,self.w_r)+self.b_r)
            return y_preds


    def loss(self):
        with tf.name_scope('loss'):
            self.loss_v = tf.reduce_mean(tf.abs((self.y_preds-self.input_y)))
            self.op = tf.train.AdamOptimizer(self.learning_rate).minimize(self.loss_v)




