# models: RNN, LSTM, GRU, MLP, LogisticRegression, LookupTable
import numpy
import theano
import theano.tensor as T
from initialization import constant_weight, uniform_weight, ortho_weight, norm_weight
from theano.tensor.nnet import categorical_crossentropy
from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
import logging
import sys
import os

logger = logging.getLogger(__name__)

class LookupTable(object):

    def __init__(self, vocab_size, embedding_size, name='embeddings'):

        self.vocab_size = vocab_size
        self.embedding_size = embedding_size
        self.W = uniform_weight(shape=(vocab_size, embedding_size), name=name)
        # parameters of the model
        self.params = [self.W]

    def apply(self, indices):

        outshape = [indices.shape[i] for i in range(indices.ndim)] + [self.embedding_size]
        return self.W[indices.flatten()].reshape(outshape)


class LogisticRegression(object):

    """Multi-class Logistic Regression Class"""

    def __init__(self, input, n_in, n_out, name='LR'):

        # initialize the weights W as a matrix of shape (n_in, n_out)
        self.W = uniform_weight(shape=(n_in, n_out), name=_p(name, 'W'))

		# initialize the baises b as a vector of n_out 0s
        self.b = constant_weight(shape=(n_out, ), name=_p(name, 'b'))

        # compute vector of class-membership probabilities in symbolic form
        energy = T.dot(input, self.W) + self.b

        if energy.ndim == 3:
            energy_exp = T.exp(energy - T.max(energy, 2, keepdims=True))
            pmf = energy_exp / energy_exp.sum(2, keepdims=True)
            self.p_y_given_x = pmf
        else:
            self.p_y_given_x = T.nnet.softmax(energy)

        # parameters of the model
        self.params = [self.W, self.b]


    def cost(self, targets, mask=None):
        prediction = self.p_y_given_x
        if prediction.ndim == 3:
            prediction_flat = prediction.reshape(((prediction.shape[0] *
                                                   prediction.shape[1]),
                                                  prediction.shape[2]), ndim=2)
            targets_flat = targets.flatten()
            mask_flat = mask.flatten()
            ce = categorical_crossentropy(prediction_flat, targets_flat) * mask_flat
            return T.sum(ce)

        #assert mask is None
        ce = categorical_crossentropy(prediction, targets.flatten())
        return T.sum(ce)

#    def errors(self, y):
#        y_pred = T.argmax(self.p_y_given_x, axis=-1)
#        y = y.flatten()
#        y_pred = y_pred.flatten()
#        return T.mean(T.neq(y, y_pred))

    def result(self, y):
        y = y.flatten()
        y_pred = T.argmax(self.p_y_given_x, axis=-1)
        y_pred = y_pred.flatten()
        return y, y_pred

    def beam(self):
        prediction = self.p_y_given_x
        if prediction.ndim == 3:
            prediction_flat = prediction.reshape(((prediction.shape[0] *
                                                   prediction.shape[1]),
                                                  prediction.shape[2]), ndim=2)
            return prediction_flat
        return prediction

class HiddenLayer(object):
    def __init__(self, n_in, n_out, name = 'HL'):
        self.W = uniform_weight(shape=(n_in, n_out), name=_p(name, 'W'))
        self.b = constant_weight(shape=(n_out, ), name=_p(name, 'b'))
        # parameters of the model
        self.params = [self.W, self.b]

    def apply(self, input):
        lin_output = T.dot(input, self.W) + self.b
        #self.output =  T.tanh(lin_output)
        self.output =  T.nnet.relu(lin_output)
        #T.tanh(lin_output)
        return self.output


class GRU(object):

    def __init__(self, n_in, n_hids, name='GRU'):

		self.n_in = n_in
		self.n_hids = n_hids
		self.pname = name
		self._init_params()

    def _init_params(self):

        shape_xh = (self.n_in, self.n_hids)
        shape_hh = (self.n_hids, self.n_hids)

        self.W_xz = norm_weight(shape=shape_xh, name=_p(self.pname, 'W_xz'))
        self.W_xr = norm_weight(shape=shape_xh, name=_p(self.pname, 'W_xr'))
        self.W_xh = norm_weight(shape=shape_xh, name=_p(self.pname, 'W_xh'))
        self.W_hz = ortho_weight(shape=shape_hh, name=_p(self.pname, 'W_hz'))
        self.W_hr = ortho_weight(shape=shape_hh, name=_p(self.pname, 'W_hr'))
        self.W_hh = ortho_weight(shape=shape_hh, name=_p(self.pname, 'W_hh'))
        self.b_z = constant_weight(shape=(self.n_hids, ), name=_p(self.pname, 'b_z'))
        self.b_r = constant_weight(shape=(self.n_hids, ), name=_p(self.pname, 'b_r'))
        self.b_h = constant_weight(shape=(self.n_hids, ), name=_p(self.pname, 'b_h'))

        self.params = [self.W_xz, self.W_xr, self.W_xh,
                       self.W_hz, self.W_hr, self.W_hh,
                       self.b_z, self.b_r, self.b_h]

    def _step(self, x_t, x_m, h_tm1):
        '''
        x_t: input at time t
        x_m: mask of x_t
        h_tm1: previous state
        '''
        z_t = T.nnet.sigmoid(T.dot(x_t, self.W_xz) +
                             T.dot(h_tm1, self.W_hz) + self.b_z)

        r_t = T.nnet.sigmoid(T.dot(x_t, self.W_xr) +
                             T.dot(h_tm1, self.W_hr) + self.b_r)

        can_h_t = T.tanh(T.dot(x_t, self.W_xh) +
                         r_t * T.dot(h_tm1, self.W_hh) +
                         self.b_h)
        h_t = (1 - z_t) * h_tm1 + z_t * can_h_t

        h_t = x_m[:, None] * h_t + (1. - x_m[:, None])*h_tm1

        return h_t

    def apply(self, state_below, mask_below, init_state=None):
        if state_below.ndim == 3:
            batch_size = state_below.shape[1]
            n_steps = state_below.shape[0]
        else:
            raise NotImplementedError

        if init_state is None:
            init_state = T.alloc(numpy.float32(0.), batch_size, self.n_hids)

        rval, updates = theano.scan(self._step,
                                    sequences=[state_below, mask_below],
                                    outputs_info=[init_state],
                                    n_steps=n_steps)
        self.output = rval
        return self.output

    def merge_out(self, state_below, mask_below):
        hiddens = self.apply(state_below, mask_below)
        n_in = self.n_in + self.n_hids
        n_out = self.n_hids * 2 # for maxout
        combine = T.concatenate([state_below, hiddens], axis=2)

        #self.W_m = uniform_weight(shape=(n_in, n_out), name=_p(self.pname, 'W_m'))
        self.W_m = norm_weight(shape=(n_in, n_out), name=_p(self.pname, 'W_m'))
        self.b_m = constant_weight(shape=(n_out,), name=_p(self.pname, 'b_m'))

        self.params += [self.W_m, self.b_m]

        # maxout
        merge_out = theano.dot(combine, self.W_m) + self.b_m
        merge_max_out = merge_out.reshape((merge_out.shape[0],
                                       merge_out.shape[1],
                                       merge_out.shape[2]/2,
                                       2), ndim=4).max(axis=3)

        return merge_max_out * mask_below[:, :, None]


class LSTM(object):

    def __init__(self, n_in, n_hids, name='LSTM'):

        self.n_in = n_in
        self.n_hids = n_hids
        self.pname = name
        self._init_params()

    def _init_params(self):

        shape_xh = (self.n_in, self.n_hids)
        shape_hh = (self.n_hids, self.n_hids)

        self.W_xi = norm_weight(shape=shape_xh, name=_p(self.pname, 'W_xi'))
        self.W_xf = norm_weight(shape=shape_xh, name=_p(self.pname, 'W_xf'))
        self.W_xc = norm_weight(shape=shape_xh, name=_p(self.pname, 'W_xc'))
        self.W_xo = norm_weight(shape=shape_xh, name=_p(self.pname, 'W_xo'))
        self.W_hi = ortho_weight(shape=shape_hh, name=_p(self.pname, 'W_hi'))
        self.W_hf = ortho_weight(shape=shape_hh, name=_p(self.pname, 'W_hf'))
        self.W_hc = ortho_weight(shape=shape_hh, name=_p(self.pname, 'W_hc'))
        self.W_ho = ortho_weight(shape=shape_hh, name=_p(self.pname, 'W_ho'))
        self.b_i = constant_weight(shape=(self.n_hids, ), name=_p(self.pname, 'b_i'))
        self.b_f = constant_weight(shape=(self.n_hids, ), name=_p(self.pname, 'b_f'))
        self.b_c = constant_weight(shape=(self.n_hids, ), name=_p(self.pname, 'b_c'))
        self.b_o = constant_weight(shape=(self.n_hids, ), name=_p(self.pname, 'b_o'))

        self.params = [self.W_xi, self.W_xf, self.W_xc, self.W_xo,
                       self.W_hi, self.W_hf, self.W_hc, self.W_ho,
                       self.b_i, self.b_f, self.b_c, self.b_o]

    def _step(self, x_t, x_m, h_tm1, c_tm1):
        '''
        x_t: input at time t
        x_m: mask of x_t
        h_tm1: previous state
        c_tm1: previous memory cell
        '''
        i_t = T.nnet.sigmoid(T.dot(x_t, self.W_xi) +
                             T.dot(h_tm1, self.W_hi) + self.b_i)

        f_t = T.nnet.sigmoid(T.dot(x_t, self.W_xf) +
                             T.dot(h_tm1, self.W_hf) + self.b_f)

        o_t = T.nnet.sigmoid(T.dot(x_t, self.W_xo) +
                             T.dot(h_tm1, self.W_ho) + self.b_o)

        c_t = T.tanh(T.dot(x_t, self.W_xc) +
                     T.dot(h_tm1, self.W_hc) + self.b_c)
        c_t = f_t * c_tm1 + i_t * c_t
        c_t = x_m[:, None] * c_t + (1. - x_m)[:, None] * c_tm1

        h_t = o_t * T.tanh(c_t)
        h_t = x_m[:, None] * h_t + (1. - x_m)[:, None] * h_tm1

        return h_t, c_t

    def apply(self, state_below, mask_below, init_state=None):

        if state_below.ndim == 3:
            batch_size = state_below.shape[1]
            n_steps = state_below.shape[0]
        else:
            raise NotImplementedError

        if init_state is None:
            init_state_h = T.alloc(numpy.float64(0.), batch_size, self.n_hids)
            init_state_c = T.alloc(numpy.float64(0.), batch_size, self.n_hids)


        rval, updates = theano.scan(self._step,
                                    sequences=[state_below, mask_below],
                                    outputs_info=[init_state_h, init_state_c],
                                    n_steps=n_steps)
        self.output = rval[0]
        return self.output

    def merge_out(self, state_below, mask_below):
        hiddens = self.apply(state_below, mask_below)
        n_in = self.n_in + self.n_hids
        n_out = self.n_hids * 2 # for maxout
        combine = T.concatenate([state_below, hiddens], axis=2)

        #self.W_m = uniform_weight(shape=(n_in, n_out), name=_p(self.pname, 'W_m'))
        self.W_m = norm_weight(shape=(n_in, n_out), name=_p(self.pname, 'W_m'))
        self.b_m = constant_weight(shape=(n_out,), name=_p(self.pname, 'b_m'))

        self.params += [self.W_m, self.b_m]

        # maxout
        merge_out = theano.dot(combine, self.W_m) + self.b_m
        merge_max_out = merge_out.reshape((merge_out.shape[0],
                                       merge_out.shape[1],
                                       merge_out.shape[2]/2,
                                       2), ndim=4).max(axis=3)

        return merge_max_out * mask_below[:, :, None]


class Parser(object):

    def __init__(self, word_size, word_nembed, pos_size, pos_nembed, nterm_size, nterm_nembed, nhids, op_size, dropout, saveto, **kwargs):
        self.n_word_in = word_nembed
        self.n_pos_in = pos_nembed
        self.n_nterm_in = nterm_nembed
        self.n_in = word_nembed * 6 + pos_nembed * 6 + nterm_nembed * 3

        self.n_hids = nhids

        self.word_size = word_size
        self.pos_size = pos_size
        self.nterm_size = nterm_size

        self.dropout = dropout
        self.path = saveto
        self.op_size = op_size

        self.params = []
        self.layers = []

    def apply(self,word, pos, nterm, op, use_noise=1):

        word_table = LookupTable(self.word_size , self.n_word_in)
        pos_table = LookupTable(self.pos_size, self.n_pos_in)
        nterm_table = LookupTable(self.nterm_size, self.n_nterm_in)

        word_mat = word_table.apply(word)
        word_mat = word_mat.reshape((word_mat.shape[0], word_mat.shape[1] * word_mat.shape[2]))
        pos_mat = pos_table.apply(pos)
        pos_mat = pos_mat.reshape((pos_mat.shape[0], pos_mat.shape[1] * pos_mat.shape[2]))

        nterm_mat = nterm_table.apply(nterm)
        nterm_mat = nterm_mat.reshape((nterm_mat.shape[0], nterm_mat.shape[1] * nterm_mat.shape[2]))

        self.layers.append(word_table)
        self.layers.append(pos_table)
        self.layers.append(nterm_table)

        self.merge_in = T.concatenate([word_mat, pos_mat, nterm_mat], axis = -1)

        hidden = HiddenLayer(self.n_in, self.n_hids) #GRU, LSTM

        hiddens = hidden.apply(self.merge_in)

        self.layers.append(hidden)

        # apply dropout
        if self.dropout < 1.0:
            # dropout is applied to the output of maxout in ghog
            logger.info('Apply dropout with p = {}'.format(self.dropout))
            hiddens = Dropout(hiddens, use_noise, self.dropout)

        logistic_layer = LogisticRegression(hiddens, self.n_hids, self.op_size)
        self.layers.append(logistic_layer)

        self.cost = logistic_layer.cost(op)
        self.y, self.y_pred = logistic_layer.result(op)
        self.softmax_y = logistic_layer.beam()

        for layer in self.layers:
            self.params.extend(layer.params)

        self.L1 = sum(T.sum(abs(param)) for param in self.params)
        self.L2 = sum(T.sum(param ** 2) for param in self.params)

    def save(self, num = ""):
        filenpz = open(num + self.path, "w")
        val = dict([(str(index), value.get_value()) for index, value in enumerate(self.params)])
        numpy.savez(num + self.path, **val)
        filenpz.close()

    def load(self, num = ""):
        if os.path.isfile(num + self.path):
            logger.error("load params {}".format(num + self.path))
            val = numpy.load(num + self.path)
            for index, param in enumerate(self.params):
                index = str(index)
                if param.get_value().shape != val[index].shape:
                    raise Exception("loading params shape mismatch")
                else:
                    param.set_value(val[index], borrow=True)
        else:
            logger.error("file {} does not exist".format(self.path))

# dropout
def Dropout(state_before, use_noise, dropout):
    trng = RandomStreams(1234)
    proj = T.switch(use_noise,
                    state_before * trng.binomial(state_before.shape, p=dropout, n=1,
                                                 dtype=state_before.dtype),
                    state_before * (1. - dropout))
    return proj

# make prefix-appended name
def _p(pp, name):
    return '%s_%s' % (pp, name)

