import math

import torch

from torch import nn
from torch.nn import functional as F, Parameter
from torch.autograd.function import Function, InplaceFunction, once_differentiable
from torch._thnn import type2backend


class LSTM(nn.Module):

    def __init__(self, input_size, hidden_size, batch_first=True):
        """Initialize params."""
        super(LSTM, self).__init__()
        self.input_size = input_size
        self.hidden_size = hidden_size
        self.num_layers = 1
        self.batch_first = batch_first

        self.input_weights = nn.Linear(input_size, 4 * hidden_size)
        self.hidden_weights = nn.Linear(hidden_size, 4 * hidden_size)

    def forward(self, input, hidden, ctx, ctx_mask=None):
        """Propogate input through the network."""
        # tag = None  #
        def recurrence(input, hidden):
            """Recurrence helper."""
            hx, cx = hidden  # n_b x hidden_dim
            gates = self.input_weights(input) + \
                self.hidden_weights(hx)
            ingate, forgetgate, cellgate, outgate = gates.chunk(4, 1)

            ingate = torch.sigmoid(ingate)
            forgetgate = torch.sigmoid(forgetgate)
            cellgate = torch.tanh(cellgate)  # o_t
            outgate = torch.sigmoid(outgate)

            cy = (forgetgate * cx) + (ingate * cellgate)
            hy = outgate * torch.tanh(cy)  # n_b x hidden_dim

            return hy, cy

        if self.batch_first:
            input = input.transpose(0, 1)

        output = []
        steps = range(input.size(0))
        for i in steps:
            hidden = recurrence(input[i], hidden)
            if isinstance(hidden, tuple):
                output.append(hidden[0])
            else:
                output.append(hidden)

            # output.append(hidden[0] if isinstance(hidden, tuple) else hidden)
            # output.append(isinstance(hidden, tuple) and hidden[0] or hidden)

        output = torch.cat(output, 0).view(input.size(0), *output[0].size())

        if self.batch_first:
            output = output.transpose(0, 1)

        return output, hidden


class LSTMPeepHole(nn.Module):
    def __init__(self, input_size, hidden_size, batch_first=True):
        """Initialize params."""
        super(LSTMPeepHole, self).__init__()
        self.input_size = input_size
        self.hidden_size = hidden_size
        self.num_layers = 1
        self.batch_first = batch_first

        self.input_weights = nn.Linear(input_size, 4 * hidden_size, bias=False)
        self.hidden_weights = nn.Linear(hidden_size, 4 * hidden_size, bias=True)
        self.cx_weights = nn.Linear(hidden_size, 2*hidden_size, bias=False)
        self.cy_weights = nn.Linear(hidden_size, hidden_size, bias=False)

    def hard_sigmoid(self, x):
        """
        Computes element-wise hard sigmoid of x.
        See e.g. https://github.com/Theano/Theano/blob/master/theano/tensor/nnet/sigm.py#L279
        """
        x = (0.2 * x) + 0.5
        x = F.threshold(-x, -1, -1)
        x = F.threshold(-x, 0, 0)
        return x

    def forward(self, input, hidden, ctx, ctx_mask=None):
        """Propogate input through the network."""
        # tag = None  #
        def recurrence(input, hidden):
            """Recurrence helper."""
            hx, cx = hidden  # n_b x hidden_dim
            gates = self.input_weights(input) + \
                self.hidden_weights(hx)
            ingate, forgetgate, cellgate, outgate = gates.chunk(4, 1)
            ingate_cx, forgetgate_cx = (self.cx_weights(cx)).chunk(2, 1)

            ingate = self.hard_sigmoid(ingate + ingate_cx)
            forgetgate = self.hard_sigmoid(forgetgate + forgetgate_cx)
            cellgate = torch.tanh(cellgate)  # o_t

            cy = (forgetgate * cx) + (ingate * cellgate)
            outgate = self.hard_sigmoid(outgate + self.cy_weights(cy))
            hy = outgate * torch.tanh(cy)  # n_b x hidden_dim
            return hy, cy

        if self.batch_first:
            input = input.transpose(0, 1)

        output = []
        steps = range(input.size(0))
        for i in steps:
            hidden = recurrence(input[i], hidden)
            if isinstance(hidden, tuple):
                output.append(hidden[0])
            else:
                output.append(hidden)

        output = torch.cat(output, 0).view(input.size(0), *output[0].size())

        if self.batch_first:
            output = output.transpose(0, 1)

        return output, hidden


class LSTMd(nn.Module):

    def __init__(self, input_size, hidden_size, device=None, batch_first=True,):
        """Initialize params."""
        super(LSTMd, self).__init__()
        self.input_size = input_size
        self.hidden_size = hidden_size
        self.num_layers = 1
        self.batch_first = batch_first
        self.device = device

        self.input_weights = nn.Linear(input_size, 4 * hidden_size, bias=False)
        self.hidden_weights = nn.Linear(hidden_size, 4 * hidden_size, bias=False)
        self.t_weights = nn.Linear(hidden_size, 3 * hidden_size, bias=False)
        self.cy_weights = nn.Linear(hidden_size, hidden_size, bias=False)
        self.T_weihts = nn.Linear(hidden_size, hidden_size)
        self.bias = Parameter(torch.Tensor(4*hidden_size))
        self.reset_parameters()

    def reset_parameters(self):
        self.bias.data[self.hidden_size:self.hidden_size*2] = torch.ones(self.hidden_size)
        for weight in self.hidden_weights.parameters():
            nn.init.orthogonal_(weight)

    def hard_sigmoid(self, x):
        """
        Computes element-wise hard sigmoid of x.
        See e.g. https://github.com/Theano/Theano/blob/master/theano/tensor/nnet/sigm.py#L279
        """
        x = (0.2 * x) + 0.5
        x = F.threshold(-x, -1, -1)
        x = F.threshold(-x, 0, 0)
        return x

    def init_hidden(self, input):
        hx = input.new_zeros(input.size(0), self.hidden_size, requires_grad=False)
        hx = (hx, hx, hx)
        return hx

    def forward(self, input, hidden=None):
        """Propogate input through the network."""
        # tag = None  #
        def recurrence(input, hidden):
            if hidden is None:
                hidden = self.init_hidden(input)
            """Recurrence helper."""
            hx, cx, Tx = hidden  # n_b x hidden_dim
            # bias = self.bias.view(-1)
            gates = self.input_weights(input) + \
                    self.hidden_weights(hx) + self.bias
            ingate, forgetgate, cellgate, outgate = gates.chunk(4, 1)
            ingate_t, forget_t, cellgate_t = (self.t_weights(Tx)).chunk(3,1)

            ingate = self.hard_sigmoid(ingate + ingate_t)
            forgetgate = self.hard_sigmoid(forgetgate + forget_t)
            cellgate = torch.tanh(cellgate + cellgate_t)  # o_t

            cy = (forgetgate * cx) + (ingate * cellgate)
            outgate = self.hard_sigmoid(outgate + self.cy_weights(cy))
            hy = outgate * torch.tanh(cy)  # n_b x hidden_dim
            Ty = torch.tanh(self.T_weihts(hy))
            return hy, cy, Ty

        if self.batch_first:
            input = input.transpose(0, 1)

        output = []
        T = []
        steps = range(input.size(0))
        for i in steps:
            hidden = recurrence(input[i], hidden)
            if isinstance(hidden, tuple):
                output.append(hidden[0])
                T.append(hidden[-1])
            else:
                output.append(hidden)

            # output.append(hidden[0] if isinstance(hidden, tuple) else hidden)
            # output.append(isinstance(hidden, tuple) and hidden[0] or hidden)

        output = torch.cat(output, 0).view(input.size(0), *output[0].size())
        T = torch.cat(T, 0).view(input.size(0), *T[0].size())

        if self.batch_first:
            output = output.transpose(0, 1)
            T = T.transpose(0,1)

        return output, hidden, T


class LSTMFused(Function):
    @staticmethod
    def forward(ctx, input_gate, hidden_gate, cx, ibias=None, hbias=None):
        ctx.backend = type2backend[input_gate.type()]
        hy = input_gate.new()
        cy = input_gate.new()

        ctx.has_bias = False
        if ibias is not None:
            ctx.has_bias = True
            if ibias.dim() == 1:
                ibias = ibias.unsqueeze(0)
            if hbias.dim() == 1:
                hbias = hbias.unsqueeze(0)

        # input_gate gets overwritten with some intermediate values to use in backwards
        ctx.backend.LSTMFused_updateOutput(
            ctx.backend.library_state,
            input_gate, hidden_gate,
            ibias, hbias,
            cx, hy, cy)

        ctx.hgate_size = hidden_gate.size()
        ctx.save_for_backward(input_gate, cx, cy)

        return hy, cy

    @staticmethod
    @once_differentiable
    def backward(ctx, *gradOutput):
        ctx.backend = type2backend[gradOutput[0].type()]
        gradInputCx = gradOutput[0].new()
        gradInGates = gradOutput[0].new(*ctx.hgate_size)

        saved_tens, cx, cy = ctx.saved_tensors
        ctx.backend.LSTMFused_updateGradInput(
            ctx.backend.library_state,
            saved_tens, gradInGates, cx, cy,
            gradOutput[0], gradOutput[1], gradInputCx)

        gb1 = gb2 = None
        if ctx.has_bias:
            gb1 = gradInGates.sum(0, keepdim=False)
            gb2 = gradInGates.sum(0, keepdim=False)

        return gradInGates, gradInGates, gradInputCx, gb1, gb2