import numpy as np

import torch as T
from torch.autograd import Variable

class ReqModel(T.nn.Module):

    def __init__(self, embDim, hidDim, outDim):
        super(ReqModel, self).__init__()
        
        self.hidDim = hidDim

        self.lstmLayer = T.nn.LSTM(
            input_size=embDim,
            hidden_size=hidDim,
            batch_first=True,
            num_layers=1
        )

    def forward(self, reqEmb, randIn):
        # rand用于引入随机性；以后可以改为上下文
        h = T.rand(1, reqEmb.shape[0], self.hidDim) * 2 - 1
        c = T.rand(1, reqEmb.shape[0], self.hidDim) * 2 - 1
        if randIn:
            reqOut, hc = self.lstmLayer(reqEmb, (h, c))
        else:
            reqOut, hc = self.lstmLayer(reqEmb)
        return reqOut, hc

def length(x):
    return (x * x).sum().sqrt()

class ResModel(T.nn.Module):

    def __init__(self, embDim, hidDim, outDim):
        super(ResModel, self).__init__()

        self.lstmLayer = T.nn.LSTM(
            input_size=embDim,
            hidden_size=hidDim,
            batch_first=True,
            num_layers=1
        )
        self.ratio = T.nn.Softmax(dim=2)
        self.dense1 = T.nn.Linear(hidDim*2, 256)
        self.dense2 = T.nn.Linear(256, outDim)

    def forward(self, resEmb, reqOut, lasthc, tau = 1):
        resOut, hc = self.lstmLayer(resEmb, lasthc)
        reqOutT = reqOut.transpose(1, 2)
        attention = self.ratio(T.matmul(resOut, reqOutT))
        context = T.matmul(attention, reqOut)
        combine = T.cat([context, resOut], dim=2)
        d1out = T.nn.Tanh()(self.dense1(combine))
        d2out = T.nn.LogSoftmax(dim=2)(self.dense2(d1out * tau))
        #d2out = T.nn.functional.gumbel_softmax(self.dense2(d1out), tau=tau, dim=2)
        return d2out, hc, resOut, attention

class ScoreModel(T.nn.Module):
    
    def __init__(self, embDim, filters):
        super(ScoreModel, self).__init__()

        self.filters = filters

        self.conv1 = T.nn.Conv1d(
            in_channels=embDim,
            out_channels=filters,
            kernel_size=3,
            stride=1,
            padding=1
        )
        self.conv2 = T.nn.Conv1d(
            in_channels=embDim,
            out_channels=filters,
            kernel_size=5,
            stride=1,
            padding=2
        )
        self.conv3 = T.nn.Conv1d(
            in_channels=embDim,
            out_channels=filters,
            kernel_size=7,
            stride=1,
            padding=3
        )

        self.dense1 = T.nn.Linear(filters * 3, 128)
        self.dense2 = T.nn.Linear(256, 2)

    def forward(self, reqEmb, genEmb):
        reqT = reqEmb.transpose(1, 2)
        conv1 = T.nn.Tanh()(self.conv1(reqT))
        conv2 = T.nn.Tanh()(self.conv2(reqT))
        conv3 = T.nn.Tanh()(self.conv3(reqT))
        pool1 = T.max(conv1, dim=2)[0]
        pool2 = T.max(conv2, dim=2)[0]
        pool3 = T.max(conv3, dim=2)[0]

        genT = genEmb.transpose(1, 2)
        conv4 = T.nn.Tanh()(self.conv1(genT))
        conv5 = T.nn.Tanh()(self.conv2(genT))
        conv6 = T.nn.Tanh()(self.conv3(genT))
        pool4 = T.max(conv4, dim=2)[0]
        pool5 = T.max(conv5, dim=2)[0]
        pool6 = T.max(conv6, dim=2)[0]

        poolReq = T.cat([pool1, pool2, pool3], dim=1)
        poolGen = T.cat([pool4, pool5, pool6], dim=1)
        dense1Req = T.nn.Tanh()(self.dense1(poolReq))
        dense1Gen = T.nn.Tanh()(self.dense1(poolGen))
        dense1 = T.cat([dense1Req, dense1Gen], dim=1)
        dense2 = T.nn.LogSoftmax(dim=1)(self.dense2(dense1))
        return dense2, dense1

from itertools import chain
import random

class LSTM():

    def __init__(self, embDim, hidDim, outDim):

        self.outDim = outDim
        self.width = 30

        self.embedding = T.nn.Embedding(
            num_embeddings=outDim,
            embedding_dim=embDim,
            padding_idx=0
        )
        self.reqModel = ReqModel(embDim, hidDim, outDim)
        self.resModel = ResModel(embDim, hidDim, outDim)
        self.scrModel = ScoreModel(embDim, 100)

        # self.embeddingArray = T.zeros([outDim, embDim])
        self.updateEmbedding()
        
        self.lr1 = 0.0005
        self.lr2 = 0.0005
        self.tau = 1000
        self.genOptim()

    def genOptim(self):
        self.optimGen = T.optim.Adam(
            chain(self.embedding.parameters(), self.reqModel.parameters(), self.resModel.parameters()),
            lr=self.lr1
        )
        self.optimScr = T.optim.Adam(
            self.scrModel.parameters(),
            lr=self.lr2
        )

    def mulLR(self, rate):
        self.lr1 *= rate
        self.lr2 *= rate
        self.genOptim()

    def load_pretrain(self, path):
        state = T.load(path)
        self.embedding.load_state_dict(state['net']['emb'])
        self.updateEmbedding()

        self.reqModel.load_state_dict(state['net']['req'])
        self.resModel.load_state_dict(state['net']['res'])
        self.genOptim()
        return 0

    def load(self, path):
        state = T.load(path)
        self.embedding.load_state_dict(state['net']['emb'])
        self.updateEmbedding()

        self.reqModel.load_state_dict(state['net']['req'])
        self.resModel.load_state_dict(state['net']['res'])
        self.scrModel.load_state_dict(state['net']['scr'])
        self.lr1 = state['lr1']
        self.lr2 = state['lr2']
        self.genOptim()
        return state["epoch"]

    def save(self, path, epoch):
        state = {
            'net': {
                'emb': self.embedding.state_dict(),
                'req': self.reqModel.state_dict(),
                'res': self.resModel.state_dict(),
                'scr': self.scrModel.state_dict()
            },
            'epoch': epoch,
            'lr1': self.lr1,
            'lr2': self.lr2
        }
        T.save(state, path % epoch)
    
    def stats(self, batch):
        n = batch.shape[0]
        mu =  T.sum(batch, dim=0) / n
        d = batch - mu.expand(batch.shape)
        cov = (d.t() @ d) / n
        return mu, cov

    def updateEmbedding(self):
        I = T.tensor([i for i in range(self.outDim)])
        self.embeddingArray = self.embedding(I)

    def trainGen(self, req, res, limited, GAN = True):
        req = [[1] + x + [2] for x in req]
        label =  [y + [2, 2] for y in res]
        res = [[1] + y + [2] for y in res]

        self.embedding.train()
        self.reqModel.train()
        self.resModel.train()
        self.scrModel.train()
        self.optimGen.zero_grad()
        self.optimScr.zero_grad()

        # embedding array update
        self.updateEmbedding()

        # forward
        if GAN:
            fr = []
            fs = []
        loss1 = T.tensor(0.0)
        for step in range(len(req)):
            # print("gen %d" % step)
            Req = T.tensor([req[step]])
            reqEmb = self.embedding(Req)
            Res = T.tensor([res[step]])
            resEmb = self.embedding(Res)
            ResLabel = T.tensor([label[step]])

            if GAN:
                _, genEmb, out = self.eval(req[step], False, self.width if limited else None, True)
                scrR, fR = self.scrModel(reqEmb, resEmb)
                scrS, fS = self.scrModel(reqEmb, genEmb)
                fr.append(fR)
                fs.append(fS)

            reqOut, hc = self.reqModel(reqEmb, False)
            out, _, __, ___ = self.resModel(resEmb, reqOut, hc)
            loss1 += T.nn.NLLLoss()(
                #out.reshape((-1, self.outDim)).log(),
                out.reshape((-1, self.outDim)),
                ResLabel.reshape((-1, ))
            )
        
        loss1 /= len(req)
        if GAN:
            fr = T.cat(fr, dim = 0)
            fs = T.cat(fs, dim = 0)
            muR, covR = self.stats(fr)
            muS, covS = self.stats(fs)
            covSi = covS.inverse()
            covRi = covR.inverse()
            loss2 = ((covSi @ covR) + (covRi @ covS)).trace() + \
                        (muS - muR).t() @ (covSi + covRi) @ (muS - muR)
            loss2 /= muR.shape[0]
        else:
            loss2 = T.tensor(0.0)

        # backward
        loss = loss1 * 0.1  +  loss2
        print(loss1.detach().numpy(), loss2.detach().numpy(), loss.detach().numpy())
        loss.backward()
        self.optimGen.step()
        
        self.embedding.eval()
        self.reqModel.eval()
        self.resModel.eval()
        self.scrModel.eval()

        return loss

    def trainScr(self, req, res, limited):
        req = [[1] + x + [2] for x in req]
        res = [[1] + y + [2] for y in res]

        self.embedding.eval()
        self.reqModel.eval()
        self.resModel.eval()
        self.scrModel.train()

        # embedding array update
        self.updateEmbedding()

        # eval
        # 4. Construct different mini-batches for real and fake, i.e. each mini-batch needs to contain only all real images or all generated images.
        loss1 = T.tensor(0.0)
        loss2 = T.tensor(0.0)
        for step in range(len(req)):
            # print("scr %d" % step)
            Req = T.tensor([req[step]])
            reqEmb = self.embedding(Req)
            Res = T.tensor([res[step]])
            resEmb = self.embedding(Res)
            Noise = T.tensor([[1] + [random.randint(5, self.outDim) for i in range(len(Res)-2)] + [2]])
            noiseEmb = self.embedding(Noise)

            # swap
            l = [i for i in range(0, resEmb.shape[1])]
            rand1, rand2 = random.sample(l, 2)
            l[rand1], l[rand2] = l[rand2], l[rand1]
            swapEmb = resEmb[:, l, :].clone().detach().requires_grad_(True)

            _, genEmb, _ = self.eval(req[step], True, self.width if limited else None, True)
            scr, _ = self.scrModel(reqEmb, genEmb)
            scr = scr.exp()
            loss1 += ((scr[:, 1] - 0) * (scr[:, 1] - 0)).sum(dim=0)

            scr, _ = self.scrModel(reqEmb, swapEmb)
            scr = scr.exp()
            loss1 += ((scr[:, 1] - 0) * (scr[:, 1] - 0)).sum(dim=0)

            # 13: Add noise to inputs, decay over time
            scr, _ = self.scrModel(reqEmb, noiseEmb)
            scr = scr.exp()
            loss1 += ((scr[:, 1] - 0) * (scr[:, 1] - 0)).sum(dim=0)

            scr, _ = self.scrModel(reqEmb, resEmb)
            scr = scr.exp()
            loss2 += ((scr[:, 1] - 1) * (scr[:, 1] - 1)).sum(dim=0)

            scr, _ = self.scrModel(reqEmb, resEmb + (T.rand(resEmb.shape) - 0.5) * 0.02)
            scr = scr.exp()
            loss2 += ((scr[:, 1] - 1) * (scr[:, 1] - 1)).sum(dim=0)

        loss1 = loss1 / len(req) / 3
        loss2 = loss2 / len(req) / 2
        
        # backward
        self.optimGen.zero_grad()
        self.optimScr.zero_grad()
        loss1.backward(retain_graph=True)
        self.optimScr.step()

        self.optimGen.zero_grad()
        self.optimScr.zero_grad()
        loss2.backward()
        self.optimScr.step()

        self.embedding.eval()
        self.reqModel.eval()
        self.resModel.eval()
        self.scrModel.eval()

        loss = loss1+loss2
        return loss

    def score(self, req,  outEmb):
            req = [[1] + req + [2]]
            req = T.tensor(req)
            reqEmb = self.embedding(req)
            return self.scrModel(reqEmb, outEmb)[0]

    def eval(self, req, rand = True, limit = None, train = False):
        req = [[1] + req + [2]]
        req = T.tensor(req)
        res = T.tensor([[1]])

        # forward
        reqEmb = self.embedding(req)
        reqOut, hc = self.reqModel(reqEmb, rand)

        s = []
        t = [self.embeddingArray[1].reshape(1, 1,  -1)]
        o = []
        cnt = 0
        while limit == None or cnt < limit:
            if train:
                resEmb = t[-1]
            else:
                resEmb = self.embedding(res)
            
            out, hc, resOut, att = self.resModel(resEmb, reqOut, hc, self.tau if train else 1)
            out = out.exp()

            ch = int(out[0, -1, :].argmax(dim=0).detach().numpy())  # 应该是一个数
            if train:
                t.append(T.matmul(out[:, -1:, :],  self.embeddingArray))
                o.append(out)
            if ch == 2:
                break
            
            s.append(ch)
            res = T.tensor([[ch]])
            cnt += 1
        
        t.append(self.embeddingArray[2].reshape(1, 1,  -1))
        if train:
            emb = T.cat(t, dim=1)
            out = T.cat(o, dim=1)
            return s, emb, out
        else:
            return s
