# -*- coding: utf-8 -*-
"""
Created on Tue Feb  7 13:28:08 2017

@author: chuito
"""

import tensorflow as tf
from tensorflow.python.ops import array_ops
from tensorflow.python.ops.rnn_cell import RNNCell
from tensorflow.contrib.rnn.python.ops.rnn_cell import _linear
from modules import positional_encoding, multihead_attention,ff
from modules import dense
import pdb


class GRNNSRCell(RNNCell):
    def __init__(self, num_units, activation=tf.tanh, state_is_tuple=True):
        self._num_units = num_units
        self._activation = activation
        self._state_is_tuple = state_is_tuple

    @property
    def output_size(self):
        return self._num_units

    @property
    def state_size(self):
        return tf.nn.rnn_cell.LSTMStateTuple(self._num_units, self._num_units)

    def __call__(self, inputs, state, scope=None):
        with tf.variable_scope(scope or "grnnsr_cell"):
            c, h = state
            with tf.variable_scope("gates"):
                u_c, u_h, r_c, r_w = array_ops.split(
                    split_dim=1,
                    num_split=4,
                    value=tf.sigmoid(_linear([inputs, c], 4 * self._num_units, True, 1.0))
                )
            with tf.variable_scope("inputs"):
                j_c = tf.tanh(_linear([inputs, r_c * c], self._num_units, True, scope="input_c"))
                w = 2 * tf.tanh(_linear(c * r_w, self._num_units, True, scope="weights"))
                j_h = w * tf.tanh(_linear(inputs, self._num_units, True, scope="input_h"))
            new_c = u_c * c + (1 - u_c) * j_c
            new_h = u_h * h + (1 - u_h) * j_h
            new_state = tf.nn.rnn_cell.LSTMStateTuple(new_c, new_h)
            return new_h, new_state

class GRNNSPCell(RNNCell):
    def __init__(self, num_units, activation=tf.tanh, state_is_tuple=True):
        self._num_units = num_units
        self._activation = activation
        self._state_is_tuple = state_is_tuple

    @property
    def output_size(self):
        return self._num_units

    @property
    def state_size(self):
        return tf.nn.rnn_cell.LSTMStateTuple(self._num_units, self._num_units)

    def __call__(self, inputs, state, scope=None):
        with tf.variable_scope(scope or "grnnsp_cell"):
            c, h = state
            with tf.variable_scope("gates"):
                u_c, u_h, r_c, r_w = array_ops.split(
                    split_dim=1,
                    num_split=4,
                    value=tf.sigmoid(_linear([inputs, c], 4 * self._num_units, True, 1.0))
                )
            with tf.variable_scope("inputs"):
                j_c = tf.tanh(_linear([inputs, r_c * c], self._num_units, True, scope="input_c"))
                j_h = tf.tanh(_linear(inputs, self._num_units, True, scope="input_h"))
            new_c = u_c * c + (1 - u_c) * j_c
            new_h = u_h * h + (1 - u_h) * j_h
            new_state = tf.nn.rnn_cell.LSTMStateTuple(new_c, new_h)

            return new_h, new_state


class Transformer:
    '''
    xs: tuple of
        x: int32 tensor. (N, T1)
        x_seqlens: int32 tensor. (N,)
        sents1: str tensor. (N,)
    ys: tuple of
        decoder_input: int32 tensor. (N, T2)
        y: int32 tensor. (N, T2)
        y_seqlen: int32 tensor. (N, )
        sents2: str tensor. (N,)
    training: boolean.
    '''
    def __init__(self, d_model=512, maxlen=100, dropout_rate=0.3, num_blocks=6, num_heads=8, d_ff=2048):
        '''
        d_model: model output dim
        maxlen: max seq length
        dropout_rate: 
        num_blocks: num of stack of All attention block
        num_heads: 
        d_ff: the size of middle laryer in feed forward
        '''
        self.d_model = d_model
        self.maxlen = maxlen 
        self.dropout_rate = dropout_rate
        self.num_blocks = num_blocks
        self.num_heads = num_heads
        self.d_ff = d_ff

    def train(self, enc, dropout_rate, training=True):
        '''
        input:
            xs: [B, L, d_model]
        Returns
        memory: encoder outputs. (N, T1, d_model)
        '''
        with tf.variable_scope("encoder"): # , reuse=tf.AUTO_REUSE):
            # embedding
            enc *= self.d_model**0.5 # scale
            # enc = dense("encoder_tf", enc, self.d_model) 

            enc += positional_encoding(enc, self.maxlen)

            
            enc = tf.nn.dropout(enc, keep_prob=dropout_rate)

            ## Blocks
            for i in range(self.num_blocks):
                with tf.variable_scope("num_blocks_{}".format(i)): #, reuse=tf.AUTO_REUSE):
                    # self-attention
                    enc = multihead_attention(queries=enc,
                                              keys=enc,
                                              values=enc,
                                              num_heads=self.num_heads,
                                              dropout_rate=dropout_rate,
                                              training=training,
                                              causality=False)
                    # feed forward
                    enc = ff(enc, num_units=[self.d_ff, self.d_model])
        return enc, enc[:,-1,:]

