import numpy as np

import torch as T
from torch.autograd import Variable

class LSTMGen(T.nn.Module):

    def __init__(self, embDim, hidDim, seqLen, outDim):
        super(LSTMGen, self).__init__()
        
        self.embDim = embDim
        self.hidDim = hidDim
        self.seqLen = seqLen
        self.outDim = outDim

        self.lstmLayer = T.nn.LSTM(
            input_size=embDim,
            hidden_size=hidDim,
            batch_first=True,
            num_layers=3,
            dropout=0.5
        )
        self.batchNorm = T.nn.BatchNorm1d(hidDim)
        self.hiddenLayer = T.nn.Linear(hidDim, outDim)
        self.softmax = T.nn.LogSoftmax(dim=1)

    def forward(self, seqIn):
        # seqIn.size = (batch, seqLen, embDim)
        seqOut, hc = self.lstmLayer(seqIn)
        # seqOut = seqOut.reshape((seqOut.shape[0], -1))
        hn = hc[0][2]
        hn = T.nn.ReLU()(self.batchNorm(hn))
        out = self.softmax(self.hiddenLayer(hn))
        return out

class LSTM():

    def __init__(self, embDim, hidDim, seqLen, outDim):
        self.embDim = embDim
        self.hidDim = hidDim
        self.seqLen = seqLen
        self.outDim = outDim

        self.lr = 0.0005
        self.model = LSTMGen(embDim, hidDim, seqLen, outDim)
        self.lossFunc = T.nn.NLLLoss()
        self.optim = T.optim.Adam(self.model.parameters(), lr=self.lr)

    def load(self, path):
        state = T.load(path)
        self.model.load_state_dict(state['net'])
        self.optim.load_state_dict(state['optimizer'])
        self.lr = state['lr']
        return state["epoch"]

    def save(self, path, epoch):
        state = {
            'net': self.model.state_dict(),
            'optimizer': self.optim.state_dict(),
            'epoch': epoch,
            'lr': self.lr
        }
        T.save(state, path % epoch)

    def trainStep(self, X, Y):
        X = T.tensor(X)
        Y = T.tensor(Y)
        self.optim.zero_grad()

        # forward
        Y_ = self.model(X)
        loss = self.lossFunc(Y_, Y)

        #backward
        loss.backward()
        self.optim.step()

        return loss

    def eval(self, X):
        X = T.tensor(X)
        Y_ = self.model(X)
        Y_ = Y_.exp()
        return Y_.detach().numpy()

    def evalMode(self):
        self.model.eval()

    def mulLR(self, rate):
        self.lr *= rate
        print("lr = %f" % self.lr)
        for param_group in self.optim.param_groups:
            param_group['lr'] = self.lr
