import numpy as np

import torch as T
from torch.autograd import Variable

class ReqModel(T.nn.Module):

    def __init__(self, embDim, hidDim, outDim):
        super(ReqModel, self).__init__()
        
        self.hidDim = hidDim

        self.lstmLayer = T.nn.LSTM(
            input_size=embDim,
            hidden_size=hidDim,
            batch_first=True,
            num_layers=1,
            bidirectional=True
        )

    def forward(self, reqEmb, randIn):
        # rand用于引入随机性；以后可以改为上下文
        h = T.rand(2, reqEmb.shape[0], self.hidDim) * 2 - 1
        c = T.rand(2, reqEmb.shape[0], self.hidDim) * 2 - 1
        if randIn:
            reqOut, hc = self.lstmLayer(reqEmb, (h, c))
        else:
            reqOut, hc = self.lstmLayer(reqEmb)
            
        h, c = hc
        hc1 = [h[:1,  :, :], c[:1, :, :]]
        hc2 = [h[1:, :, :], c[1:, :, :]]
        reqOut1 = reqOut[:, :, :self.hidDim]
        reqOut2 = reqOut[:, :, self.hidDim:]
        
        return reqOut1, reqOut2, hc1, hc2

class ResModel(T.nn.Module):

    def __init__(self, embDim, hidDim, outDim):
        super(ResModel, self).__init__()
        self.hidDim = hidDim

        self.lstmLayer1 = T.nn.LSTM(
            input_size=embDim,
            hidden_size=hidDim,
            batch_first=True
        )
        self.lstmLayer2 = T.nn.LSTM(
            input_size=embDim,
            hidden_size=hidDim,
            batch_first=True
        )
        self.ratio = T.nn.Softmax(dim=2)
        self.dense11 = T.nn.Linear(hidDim*2, 128)
        self.dense12 = T.nn.Linear(hidDim*2, 128)
        self.dense2 = T.nn.Linear(256, outDim)

    def forward(self, resEmb, reqOut1, reqOut2, lhc1, lhc2, tau = 1):
        resOut1, hc1 = self.lstmLayer1(resEmb, lhc1)
        resOut1 = T.nn.Tanh()(resOut1)
        attention1 = T.nn.Softmax(dim=2)(resOut1 @ reqOut1.transpose(1, 2))
        context1 = attention1 @ reqOut1
        combine1 = T.cat([context1, resOut1], dim=2)
        d11out = T.nn.Tanh()(self.dense11(combine1))

        resOut2, hc2 = self.lstmLayer2(resEmb, lhc2)
        resOut2 = T.nn.Tanh()(resOut2)
        attention2 = T.nn.Softmax(dim=2)(resOut2 @ reqOut2.transpose(1, 2))
        context2 = attention2 @ reqOut2
        combine2 = T.cat([context2, resOut2], dim=2)
        d12out = T.nn.Tanh()(self.dense12(combine2))

        d1out = T.cat([d11out, d12out], dim=2)
        d2out = T.nn.LogSoftmax(dim=2)(self.dense2(d1out * tau))
        #d2out = T.nn.functional.gumbel_softmax(self.dense2(d1out), tau=tau, dim=2)
        return d2out, hc1, hc2

class ScoreCorrelationModel(T.nn.Module):
    
    def __init__(self, embDim, filters):
        super(ScoreCorrelationModel, self).__init__()

        self.filters = filters

        self.conv1 = T.nn.Conv1d(
            in_channels=embDim,
            out_channels=filters,
            kernel_size=5,
            stride=1,
            padding=2
        )
        self.conv2 = T.nn.Conv1d(
            in_channels=embDim,
            out_channels=filters,
            kernel_size=9,
            stride=1,
            padding=4
        )
        self.conv3 = T.nn.Conv1d(
            in_channels=embDim,
            out_channels=filters,
            kernel_size=13,
            stride=1,
            padding=6
        )

        self.conv4 = T.nn.Conv1d(
            in_channels=embDim,
            out_channels=filters,
            kernel_size=5,
            stride=1,
            padding=2
        )
        self.conv5 = T.nn.Conv1d(
            in_channels=embDim,
            out_channels=filters,
            kernel_size=9,
            stride=1,
            padding=4
        )
        self.conv6 = T.nn.Conv1d(
            in_channels=embDim,
            out_channels=filters,
            kernel_size=13,
            stride=1,
            padding=6
        )

        self.dense = T.nn.Linear(filters * 3, 2)

    def forward(self, reqEmb, genEmb):
        reqT = reqEmb.transpose(1, 2)
        conv1 = T.nn.Tanh()(self.conv1(reqT))
        conv2 = T.nn.Tanh()(self.conv2(reqT))
        conv3 = T.nn.Tanh()(self.conv3(reqT))
        pool1 = T.max(conv1, dim=2)[0]
        pool2 = T.max(conv2, dim=2)[0]
        pool3 = T.max(conv3, dim=2)[0]

        genT = genEmb.transpose(1, 2)
        conv4 = T.nn.Tanh()(self.conv4(genT))
        conv5 = T.nn.Tanh()(self.conv5(genT))
        conv6 = T.nn.Tanh()(self.conv6(genT))
        pool4 = T.max(conv4, dim=2)[0]
        pool5 = T.max(conv5, dim=2)[0]
        pool6 = T.max(conv6, dim=2)[0]

        pool = T.nn.Tanh()(T.cat([pool1, pool2, pool3], dim=1) - T.cat([pool4, pool5, pool6], dim=1))
        dense = T.nn.LogSoftmax(dim=1)(self.dense(pool))
        return dense, pool

class ScoreFluencyModel(T.nn.Module):
    
    def __init__(self, embDim, filters):
        super(ScoreFluencyModel, self).__init__()

        self.filters = filters

        self.conv1 = T.nn.Conv1d(
            in_channels=embDim,
            out_channels=filters,
            kernel_size=3,
            stride=1,
            padding=1
        )
        self.conv2 = T.nn.Conv1d(
            in_channels=embDim,
            out_channels=filters,
            kernel_size=4,
            stride=1,
            padding=2
        )
        self.conv3 = T.nn.Conv1d(
            in_channels=embDim,
            out_channels=filters,
            kernel_size=5,
            stride=1,
            padding=2
        )

        self.dense = T.nn.Linear(filters * 3, 2)

    def forward(self,  genEmb):
        genT = genEmb.transpose(1, 2)
        conv1 = T.nn.Tanh()(self.conv1(genT))
        conv2 = T.nn.Tanh()(self.conv2(genT))
        conv3 = T.nn.Tanh()(self.conv3(genT))
        pool1 = T.max(conv1, dim=2)[0]
        pool2 = T.max(conv2, dim=2)[0]
        pool3 = T.max(conv3, dim=2)[0]

        pool = T.cat([pool1, pool2, pool3], dim=1)
        dense = T.nn.LogSoftmax(dim=1)(self.dense(pool))
        return dense, pool

from itertools import chain
import random

class LSTMGAN():

    def __init__(self, embDim, hidDim, conDim, outDim):

        self.outDim = outDim
        self.width = 30

        self.embedding = T.nn.Embedding(
            num_embeddings=outDim,
            embedding_dim=embDim,
            padding_idx=0
        )
        self.reqModel = ReqModel(embDim, hidDim, outDim)
        self.resModel = ResModel(embDim, hidDim, outDim)
        self.scrCorModel = ScoreCorrelationModel(embDim, conDim)
        self.scrFluModel = ScoreFluencyModel(embDim, conDim)

        # self.embeddingArray = T.zeros([outDim, embDim])
        self.updateEmbedding()
        
        self.lr1 = 0.0005
        self.lr2 = 0.0005
        self.tau = 100
        self.genOptim()

    def genOptim(self):
        self.optimGen = T.optim.Adam(
            chain(self.embedding.parameters(), self.reqModel.parameters(), self.resModel.parameters()),
            lr=self.lr1
        )
        self.optimScr = T.optim.Adam(
            chain(self.scrCorModel.parameters(), self.scrFluModel.parameters()),
            lr=self.lr2
        )

    def mulLR(self, rate):
        self.lr1 *= rate
        self.lr2 *= rate
        self.genOptim()

    def load_pretrain(self, path):
        state = T.load(path)
        self.embedding.load_state_dict(state['net']['emb'])
        self.updateEmbedding()

        self.reqModel.load_state_dict(state['net']['req'])
        self.resModel.load_state_dict(state['net']['res'])
        self.genOptim()
        return -1

    def load(self, path):
        state = T.load(path)
        self.embedding.load_state_dict(state['net']['emb'])
        self.updateEmbedding()

        self.reqModel.load_state_dict(state['net']['req'])
        self.resModel.load_state_dict(state['net']['res'])
        self.scrCorModel.load_state_dict(state['net']['scrCor'])
        self.scrFluModel.load_state_dict(state['net']['scrFlu'])
        self.lr1 = state['lr1']
        self.lr2 = state['lr2']
        self.genOptim()
        return state["epoch"]

    def save(self, path, epoch):
        state = {
            'net': {
                'emb': self.embedding.state_dict(),
                'req': self.reqModel.state_dict(),
                'res': self.resModel.state_dict(),
                'scrCor': self.scrCorModel.state_dict(),
                'scrFlu': self.scrFluModel.state_dict()
            },
            'epoch': epoch,
            'lr1': self.lr1,
            'lr2': self.lr2
        }
        T.save(state, path % epoch)
    
    def stats(self, batch):
        n = batch.shape[0]
        mu =  T.sum(batch, dim=0) / n
        d = batch - mu.expand(batch.shape)
        cov = (d.t() @ d) / n
        return mu, cov

    def updateEmbedding(self):
        I = T.tensor([i for i in range(self.outDim)])
        self.embeddingArray = self.embedding(I)

    def trainGen(self, req, res, limited, GAN = True):
        req = [[1] + x + [2] for x in req]
        label =  [y + [2, 2] for y in res]
        res = [[1] + y + [2] for y in res]

        self.embedding.train()
        self.reqModel.train()
        self.resModel.train()
        self.scrCorModel.eval()
        self.scrFluModel.eval()
        self.optimGen.zero_grad()
        self.optimScr.zero_grad()

        # embedding array update
        self.updateEmbedding()

        # forward
        if GAN:
            loss2 = T.tensor(0.0)
            loss3 = T.tensor(0.0)
            frCor = []
            fsCor = []
            frFlu = []
            fsFlu = []
        else:
            loss1 = T.tensor(0.0)
        for step in range(len(req)):
            # print("gen %d" % step)
            Req = T.tensor([req[step]])
            reqEmb = self.embedding(Req)
            Res = T.tensor([res[step]])
            resEmb = self.embedding(Res)
            ResLabel = T.tensor([label[step]])

            if GAN:
                _, genEmb = self.eval(req[step], False, self.width if limited else None, True)
                #scrR, fR = self.scrCorModel(reqEmb, resEmb)
                scrS, fS = self.scrCorModel(reqEmb, genEmb)
                scrS = scrS.exp()
                #frCor.append(fR)
                #fsCor.append(fS)
                loss2 += ((scrS[:, 1] - 1) * (scrS[:, 1] - 1)).sum(dim=0)
                loss2 += ((scrS[:, 0]) * (scrS[:, 0])).sum(dim=0)
                
                #scrR, fR = self.scrFluModel(resEmb)
                scrS, fS = self.scrFluModel(genEmb)
                scrS = scrS.exp()
                #frFlu.append(fR)
                #fsFlu.append(fS)
                loss3 += ((scrS[:, 1] - 1) * (scrS[:, 1] - 1)).sum(dim=0)
                loss3 += ((scrS[:, 0]) * (scrS[:, 0])).sum(dim=0)

            else:
                reqOut1, reqOut2, hc1, hc2 = self.reqModel(reqEmb, False)
                out = self.resModel(resEmb, reqOut1, reqOut2, hc1, hc2, 1)[0]
                loss1 += T.nn.NLLLoss()(
                    out.reshape((-1, self.outDim)),
                    ResLabel.reshape((-1, ))
                )
        
        if GAN:
            '''
            fr = T.cat(frCor, dim = 0)
            fs = T.cat(fsCor, dim = 0)
            muR, covR = self.stats(fr)
            muS, covS = self.stats(fs)
            covSi = covS.inverse()
            covRi = covR.inverse()
            loss2 = ((covSi @ covR) + (covRi @ covS)).trace() + \
                        (muS - muR).t() @ (covSi + covRi) @ (muS - muR)

            print("batch: %d" % len(frCor))
            print("width: %d" % covR.shape[0])

            fr = T.cat(frFlu, dim = 0)
            fs = T.cat(fsFlu, dim = 0)
            muR, covR = self.stats(fr)
            muS, covS = self.stats(fs)
            covSi = covS.inverse()
            covRi = covR.inverse()
            loss3 = ((covSi @ covR) + (covRi @ covS)).trace() + \
                        (muS - muR).t() @ (covSi + covRi) @ (muS - muR)
            '''
            loss2 /= len(req)
            loss3 /= len(req)
        else:
            loss1 /= len(req)

        # backward
        if GAN:
            loss = loss2 * 0.4 + loss3
            print(loss2.detach().numpy(), loss3.detach().numpy(), loss.detach().numpy())
        else:
            loss = loss1
        
        loss.backward()
        self.optimGen.step()

        self.embedding.eval()
        self.reqModel.eval()
        self.resModel.eval()
        self.scrCorModel.eval()
        self.scrFluModel.eval()

        return loss

    def trainScr(self, req, res, limited):
        req = [[1] + x + [2] for x in req]
        res = [[1] + y + [2] for y in res]

        self.embedding.eval()
        self.reqModel.eval()
        self.resModel.eval()
        self.scrCorModel.train()
        self.scrFluModel.train()

        # embedding array update
        self.updateEmbedding()

        # eval
        # 4. Construct different mini-batches for real and fake, i.e. each mini-batch needs to contain only all real images or all generated images.
        loss = T.tensor(0.0)


        # Train Correlation Score
        print('Cor...')
        loss1 = T.tensor(0.0)
        loss2 = T.tensor(0.0)
        for step in range(len(req)):
            #print("scr %d" % step)
            
            Req = T.tensor([req[step]])
            reqEmb = self.embedding(Req)
            Res = T.tensor([res[step]])
            resEmb = self.embedding(Res)

            # swap
            l = [i for i in range(0, resEmb.shape[1])]
            rand1, rand2 = random.sample(l, 2)
            l[rand1], l[rand2] = l[rand2], l[rand1]
            swapResEmb = resEmb[:, l, :]  #.clone().detach().requires_grad_(True)

            SD = 0.1
            noise = 0.01
            _, genEmb = self.eval(req[step], True, self.width if limited else None, True)

            scr, _ = self.scrCorModel(reqEmb, genEmb + (T.randn(genEmb.shape) * noise))
            scr = scr.exp()
            label = 0.0 + T.randn(1) * SD
            loss1 += ((scr[:, 1] - label) * (scr[:, 1] - label)).sum(dim=0)
            loss1 += ((scr[:, 0] - 1 + label) * (scr[:, 0] - 1 +  label)).sum(dim=0)

            scr, _ = self.scrCorModel(reqEmb, resEmb + (T.randn(resEmb.shape) * noise))
            scr = scr.exp()
            label = 1.0 +  T.randn(1) * SD
            loss2 += ((scr[:, 1] - label) * (scr[:, 1] - label)).sum(dim=0)
            loss2 += ((scr[:, 0] - 1 + label) * (scr[:, 0] - 1 +  label)).sum(dim=0)

        loss1 = loss1 / len(req)
        loss2 = loss2 / len(req)

        # backward
        self.optimGen.zero_grad()
        self.optimScr.zero_grad()
        loss1.backward(retain_graph=True)
        self.optimScr.step()

        self.optimGen.zero_grad()
        self.optimScr.zero_grad()
        loss2.backward()
        self.optimScr.step()

        loss += loss1.detach().numpy() + loss2.detach().numpy()
        

        # Train Fluency Score
        print('Flu...')
        loss1 = T.tensor(0.0)
        loss2 = T.tensor(0.0)
        for step in range(len(req)):
            #print("scr %d" % step)
            
            Req = T.tensor([req[step]])
            reqEmb = self.embedding(Req)
            Res = T.tensor([res[step]])
            resEmb = self.embedding(Res)
            loop = random.randint(1, 6)
            Noise = T.tensor([[1] + [random.randint(5, self.outDim) for i in range(loop)]  * ((len(Res) - 2) // loop + 1) + [2]])
            noiseEmb = self.embedding(Noise)

            # swap
            l = [i for i in range(0, resEmb.shape[1])]
            rand1, rand2 = random.sample(l, 2)
            l[rand1], l[rand2] = l[rand2], l[rand1]
            swapResEmb = resEmb[:, l, :]  #.clone().detach().requires_grad_(True)

            SD = 0.1
            noise = 0.01
            _, genEmb = self.eval(req[step], True, self.width if limited else None, True)

            scr, _ = self.scrFluModel(noiseEmb + (T.randn(noiseEmb.shape) * noise))
            scr = scr.exp()
            label = 0 + T.randn(1) * SD
            loss1 += ((scr[:, 1] - label) * (scr[:, 1] -  label)).sum(dim=0)
            loss1 += ((scr[:, 0] - 1 + label) * (scr[:, 0] - 1 +  label)).sum(dim=0)

            scr, _ = self.scrFluModel(swapResEmb + (T.randn(swapResEmb.shape) * noise))
            scr = scr.exp()
            label = 0.3 + T.randn(1) * SD
            loss1 += ((scr[:, 1] - label) * (scr[:, 1] - label)).sum(dim=0)
            loss1 += ((scr[:, 0] - 1 + label) * (scr[:, 0] - 1 +  label)).sum(dim=0)

            scr, _ = self.scrFluModel(genEmb + (T.randn(genEmb.shape) * noise))
            scr = scr.exp()
            label = 0.8 + T.randn(1) * SD
            loss2 += ((scr[:, 1] - label) * (scr[:, 1] - label)).sum(dim=0)
            loss2 += ((scr[:, 0] - 1 + label) * (scr[:, 0] - 1 +  label)).sum(dim=0)

            scr, _ = self.scrFluModel(resEmb + (T.randn(resEmb.shape) * noise))
            scr = scr.exp()
            label = 1.0 + T.randn(1) * SD
            loss2 += ((scr[:, 1] - label) * (scr[:, 1] - label)).sum(dim=0)
            loss2 += ((scr[:, 0] - 1 + label) * (scr[:, 0] - 1 +  label)).sum(dim=0)

        loss1 = loss1 / len(req) / 2
        loss2 = loss2 / len(req) / 2
        
        # backward
        self.optimGen.zero_grad()
        self.optimScr.zero_grad()
        loss1.backward(retain_graph=True)
        self.optimScr.step()

        self.optimGen.zero_grad()
        self.optimScr.zero_grad()
        loss2.backward()
        self.optimScr.step()

        self.embedding.eval()
        self.reqModel.eval()
        self.resModel.eval()
        self.scrCorModel.eval()
        self.scrFluModel.eval()

        loss += loss1.detach().numpy() + loss2.detach().numpy()

        return loss

    def score(self, req,  outEmb):
            req = [[1] + req + [2]]
            req = T.tensor(req)
            reqEmb = self.embedding(req)
            return self.scrCorModel(reqEmb, outEmb)[0].exp(),  self.scrFluModel(outEmb)[0].exp()

    def eval(self, req, rand = True, limit = None, train = False):
        req = [[1] + req + [2]]
        req = T.tensor(req)
        res = T.tensor([[1]])

        # forward
        reqEmb = self.embedding(req)
        reqOut1, reqOut2, hc1, hc2 = self.reqModel(reqEmb, rand)

        s = []
        t = [self.embeddingArray[1].reshape(1, 1,  -1)]
        cnt = 0
        while limit == None or cnt < limit:
            #if train:
            if False:
                resEmb = t[-1]
            else:
                resEmb = self.embedding(res)
            
            out, hc1, hc2 = self.resModel(resEmb, reqOut1, reqOut2, hc1, hc2, self.tau if train else 1)
            out = out.exp()

            ch = int(out[0, -1, :].argmax(dim=0).detach().numpy())  # 应该是一个数
            if train:
                t.append(out[:, -1:, :] @  self.embeddingArray)
            if ch == 2:
                break
            
            s.append(ch)
            res = T.tensor([[ch]])
            cnt += 1
        
        t.append(self.embeddingArray[2].reshape(1, 1,  -1))
        if train:
            emb = T.cat(t, dim=1)
            return s, emb
        else:
            return s
