import numpy as np

import torch as T
from torch.autograd import Variable

class ReqModel(T.nn.Module):

    def __init__(self, embDim, hidDim, outDim):
        super(ReqModel, self).__init__()
        
        self.hidDim = hidDim

        self.lstmLayer = T.nn.LSTM(
            input_size=embDim,
            hidden_size=hidDim,
            batch_first=True,
            num_layers=1
        )

    def forward(self, reqEmb):
        # rand用于引入随机性；以后可以改为上下文
        h = T.randn(1, reqEmb.shape[0], self.hidDim) * 0.3 - 0.15
        c = T.randn(1, reqEmb.shape[0], self.hidDim) * 0.3 - 0.15
        if True:
            reqOut, hc = self.lstmLayer(reqEmb, (h, c))
        else:
            reqOut, hc = self.lstmLayer(reqEmb)
        return reqOut, hc

def length(x):
    return (x * x).sum().sqrt()

class ResModel(T.nn.Module):

    def __init__(self, embDim, hidDim, outDim):
        super(ResModel, self).__init__()

        self.lstmLayer = T.nn.LSTM(
            input_size=embDim,
            hidden_size=hidDim,
            batch_first=True,
            num_layers=1
        )
        self.ratio = T.nn.Softmax(dim=2)
        self.dense1 = T.nn.Linear(hidDim*2, 256)
        self.dense2 = T.nn.Linear(256, outDim)

    def forward(self, resEmb, reqOut, lasthc):
        resOut, hc = self.lstmLayer(resEmb, lasthc)
        reqOutT = reqOut.transpose(1, 2)
        attention = self.ratio(T.matmul(resOut, reqOutT))
        context = T.matmul(attention, reqOut)
        combine = T.cat([context, resOut], dim=2)
        d1out = T.nn.Tanh()(self.dense1(combine))
        d2out = T.nn.LogSoftmax(dim=2)(self.dense2(d1out))
        return d2out, hc, resOut, attention

from itertools import chain

class LSTM():

    def __init__(self, embDim, hidDim, outDim):

        self.outDim = outDim

        self.embedding = T.nn.Embedding(
            num_embeddings=outDim,
            embedding_dim=embDim,
            padding_idx=0
        )
        self.reqModel = ReqModel(embDim, hidDim, outDim)
        self.resModel = ResModel(embDim, hidDim, outDim)
        self.lossFunc = T.nn.NLLLoss()
        
        self.lr = 0.01
        self.genOptim()

    def genOptim(self):
        self.optim = T.optim.Adam(
            chain(self.embedding.parameters(), self.reqModel.parameters(), self.resModel.parameters()),
            lr=self.lr
        )

    def mulLR(self, rate):
        self.lr *= rate
        self.genOptim()

    def load(self, path):
        state = T.load(path)
        self.embedding.load_state_dict(state['net']['emb'])
        self.reqModel.load_state_dict(state['net']['req'])
        self.resModel.load_state_dict(state['net']['res'])
        #self.optim.load_state_dict(state['optimizer'])
        self.lr = state['lr']
        self.genOptim()
        return state["epoch"]

    def save(self, path, epoch):
        state = {
            'net': {
                'emb': self.embedding.state_dict(),
                'req': self.reqModel.state_dict(),
                'res': self.resModel.state_dict()
            },
            'optimizer': self.optim.state_dict(),
            'epoch': epoch,
            'lr': self.lr
        }
        T.save(state, path % epoch)

    def trainStep(self, req, res):
        req = [[1] + x + [2] for x in req]
        label =  [y + [2, 2] for y in res]
        res = [[1] + y + [2] for y in res]

        self.embedding.train()
        self.reqModel.train()
        self.resModel.train()
        self.optim.zero_grad()

        # forward
        loss = T.tensor(0.0)
        for step in range(len(req)):
            Req = T.tensor([req[step]])
            Res = T.tensor([res[step]])
            ResLabel = T.tensor([label[step]])
            reqEmb = self.embedding(Req)
            resEmb = self.embedding(Res)
            reqOut, hc = self.reqModel(reqEmb)
            out, _, __, ___ = self.resModel(resEmb, reqOut, hc)
            loss += self.lossFunc(
                out.reshape((-1, self.outDim)),
                ResLabel.reshape((-1, ))
            )
        loss = loss / len(req)

        #backward
        loss.backward()
        self.optim.step()

        return loss

    def eval(self, req, debug = False):
        req = [[1] + req + [2]]
        req = T.tensor(req)
        res = T.tensor([[1]])

        self.embedding.eval()
        self.reqModel.eval()
        self.resModel.eval()

        # forward
        reqEmb = self.embedding(req)
        reqOut, hc = self.reqModel(reqEmb)
        s = []
        while True:
            resEmb = self.embedding(res)
            out, hc, resOut, att = self.resModel(resEmb, reqOut, hc)
            out = out[0, -1, :].argmax(dim=0).detach().numpy() # 应该是一个数
            out = int(out)

            if debug:
                for i in range(req.shape[1]):
                    d = reqOut[0, i, :] - resOut[0, 0, :]
                    if i:
                        print("Delta: %f" % length(reqOut[0, i, :] - reqOut[0, i-1, :]))
                    print("Length: %f" % length(reqOut[0, i, :]))
                    print("Dist: %f" % length(d))
                    print('--------------------------')
                print(att)
                print(out)

            if out == 2:
                break
            s.append(out)
            if len(s) >= 20:
                break
            res = T.tensor([[out]])

        return s
