import os
os.environ["MXNET_CUDNN_AUTOTUNE_DEFAULT"] = "0"
import mxnet.autograd as ag
import mxnet as mx
import numpy as np
import mxnet.ndarray as nd
import logging
import tqdm
import gluoncv, cv2
from  mxnet.gluon.data import DataLoader
from lib.data.latex import latex_dataset
import mxnet.gluon.nn as nn
def str_sum(l):
    x0 = l[0]
    for i in range(len(l)-1):
        x0 += l[i+1]
    return x0
def demo():
    LOG_DIR = "./output/latex"
    logging.basicConfig(level=logging.INFO)
    gpu_ids = [2]
    ctx_list = [mx.gpu(x) for x in gpu_ids]
    dataset = latex_dataset()
    train_loader =  DataLoader(dataset=dataset, batch_size=1, shuffle=True, num_workers=16,last_batch="discard")
    net = Resnet_CTC(alphabet_size = dataset.words_count,sequence_len=dataset.max_len)
    net.collect_params().reset_ctx(ctx=ctx_list)
    net.collect_params().load("output/latex/weights-7-158-[5284.6571].params")
    for nbatch,batch in enumerate(train_loader):
        batch = [mx.nd.array(x, ctx=ctx_list[0]) for x in batch]
        batch_img,batch_label = batch
        logits = net(batch_img)
        logits_softmax = mx.nd.softmax(logits,axis=2)
        assert logits_softmax.shape[0]==1
        logits_softmax = mx.nd.squeeze(logits_softmax)
        logits_softmax_argmax = mx.nd.argmax(logits_softmax,axis=1).asnumpy()
        dataset.index2words[dataset.words_count-1] = "blank"
        sentence = list(filter(lambda x:dataset.index2words[x] != "blank",logits_softmax_argmax))
        sentence = [ dataset.index2words[index] for index in  sentence]
        # sentence = [ index for index in  logits_softmax_argmax]
        print(str_sum(sentence))
class Conv(nn.HybridBlock):
    def __init__(self,norm = False,channels = 64,ks = 3,activation = "relu"):
        super(Conv,self).__init__()
        self.conv = nn.Conv2D(channels=channels,kernel_size=(ks,ks),padding=((ks-1)//2,(ks-1)//2),activation=activation)
        self.conv.initialize()
        self._norm = norm
        if self._norm:
            self.bn = nn.BatchNorm()
            self.bn.initialize()
    def hybrid_forward(self, F, x, *args, **kwargs):
        x = self.conv(x)
        if self._norm:
            x = self.bn(x)
        return x

class Lenet(mx.gluon.Block):
    def __init__(self):
        super(Lenet,self).__init__()
        self.feature = nn.Sequential()
        with self.feature.name_scope():
            self.feature.add(
                Conv(True,64),
                Conv(True, 128),
                Conv(True, 128),
                Conv(True, 256),
                Conv(True, 256),
                Conv(True, 512),
                Conv(True, 512),
            )
        self.initialize(init=mx.init.Xavier())
    def forward(self, x):
        return self.feature(x)
class FeatureExtracter(nn.Block):
    def __init__(self):
        super(FeatureExtracter,self).__init__()
        self.features = gluoncv.model_zoo.resnet18_v1b(pretrained=True,dilated = True)
        self.features.fc.weight.grad_req = 'null'
        self.features.fc.bias.grad_req = 'null'
    def forward(self, x):
        feat = self.features
        x = feat.conv1(x)
        x = feat.bn1(x)
        x = feat.relu(x)
        x = feat.maxpool(x)

        x = feat.layer1(x)
        x = feat.layer2(x)
        x = feat.layer3(x)
        x = feat.layer4(x)
        return x
class Encoder(nn.Block):
    def __init__(self):
        super(Encoder,self).__init__()
        self.gru = mx.gluon.rnn.GRUCell(hidden_size = 256)
        self.gru.initialize()
    def forward(self, feature):
        assert feature.shape[0]==1
        feature = nd.transpose(feature,(0,3,1,2)) #n * w * c * h
        feature = nd.reshape(feature,(0,0,-1))
        r = []
        hidden_states = self.gru.begin_state(batch_size=feature.shape[0],ctx = feature.context)
        for i in range(feature.shape[1]):
            encoder_output,hidden_states = self.gru(feature[:,feature.shape[1]-i-1,:],hidden_states)
            r.append(nd.expand_dims(encoder_output,axis=1)) #nbatch*1*256
        r = nd.concat(*r,dim = 1) # nbatch * max_len * 256
        return r,hidden_states
class AttnDecoderRNN(nn.Block):
    def __init__(self, hidden_size, output_size, dropout_p=0.1, max_length=32):
        super(AttnDecoderRNN, self).__init__()
        self.hidden_size = hidden_size
        self.output_size = output_size
        self.embedding = nn.Embedding(input_dim=self.output_size,output_dim=256,weight_initializer=mx.init.Orthogonal(rand_type = "normal"))

        self.dropout_p = dropout_p
        self.max_length = max_length

        self.attn = nn.Dense(self.max_length)
        self.attn_combine = nn.Dense(self.hidden_size)
        self.dropout = nn.Dropout(self.dropout_p)
        self.gru = mx.gluon.rnn.LSTMCell(self.hidden_size)
        self.out = nn.Dense(self.output_size)

    def forward(self, input, hidden, encoder_outputs):
        embedded = self.embedding(input).reshape((1, 1, -1))
        embedded = self.dropout(embedded)

        attn_weights = nd.softmax(
            self.attn(nd.concat(*(embedded[0], hidden[0]), dim=1)), axis=1)
        attn_applied = nd.dot(attn_weights,encoder_outputs[0])

        output = nd.concat(*(embedded[0], attn_applied), dim =1)
        output = self.attn_combine(output)

        output = nd.relu(output)
        output, hidden = self.gru(output, hidden)
        output = self.out(output)
        # output = nd.log_softmax(self.out(output), dim=1)
        return output, hidden, attn_weights

    def initHidden(self):
        return nd.zeros(shape = (1, 1, self.hidden_size))
class Decoder(nn.Block):
    def __init__(self,words_count):
        super(Decoder,self).__init__()
        self.atten_decoder= AttnDecoderRNN(hidden_size=256,output_size=words_count)
    def forward(self, encoder_outputs, labels, encoder_hidden_states):
        #encoder_outputs: nbatch , image_width//strde, 256
        #label: nbatch * max_len
        assert encoder_outputs.shape[0]==1
        assert labels is not None
        assert labels.shape[0]==1
        hidden_states = encoder_hidden_states
        encoder_output, label = encoder_outputs[0],labels[0]

        sentence = []
        outputs = nd.zeros(shape=(1,1),ctx=label.context)
        for iword in range(label.shape[0]-1):
            word = label[iword]
            # outputs,hidden_states,weights = self.atten_decoder(word.expand_dims(axis=0),hidden_states,encoder_outputs)
            outputs,hidden_states,weights = self.atten_decoder(outputs,hidden_states,encoder_outputs)
            sentence.append(outputs) # 1xwords_count
            outputs = nd.argmax(outputs,axis=1).reshape((1,1))
            outputs = outputs.detach()
        sentence = nd.concat(*sentence,dim = 0)
        sentence = sentence.expand_dims(axis = 0)
        return sentence
# class DecoderNoAtten(nn.Block):
#     def __init__(self,output_size):
#         super(DecoderNoAtten,self).__init__()
#         self.fc = nn.Dense(units=output_size)
#         self.gru = mx.gluon.rnn.GRUCell(hidden_size=output_size)
#         self.embedding = nn.Embedding(input_dim=output_size,output_dim=output_size,weight_initializer=mx.init.Orthogonal(rand_type = "normal"))
#         self.initialize()
#     def forward(self, feature,labels):
#         label = labels[0]
#         feat = nd.reshape(feature,(0,0,-1))
#         feat = nd.max(feat,axis=1)
#         feat = self.fc(feat) # 1 *1024
#         sentence = []
#         # outputs = self.embedding(nd.zeros(shape=(1,1),ctx=label.context))
#         hidden_states = [feat]
#         for iword in range(label.shape[0]-1):
#             word = label[iword]
#             outputs,hidden_states = self.gru(self.embedding(word),hidden_states)
#             sentence.append(outputs) # 1xwords_count
#             # outputs = nd.argmax(outputs,axis=1).reshape((1,1))
#             # outputs = outputs.detach()
#         sentence = nd.concat(*sentence,dim = 0)
#         sentence = sentence.expand_dims(axis = 0)
#         return sentence

class EncoderDecoder(nn.Block):
    def __init__(self,output_size):
        super(EncoderDecoder,self).__init__()
        self.feature = FeatureExtracter()
        self.encoder_gru = mx.gluon.rnn.LSTMCell(hidden_size = 256)
        self.atten_decoder = AttnDecoderRNN(hidden_size=256, output_size=output_size)
        self.fc = nn.Dense(256,flatten=False)
        self.initialize(init=mx.init.Xavier())
    def forward(self, x,labels):
        feature = self.feature(x)
        assert feature.shape[0]==1
        feature = nd.transpose(feature,(0,3,1,2)) #n * w * c * h
        feature = nd.reshape(feature,(0,0,-1))
        feature = self.fc(feature)
        encoder_outputs = []
        hidden_states = self.encoder_gru.begin_state(batch_size=feature.shape[0],ctx = feature.context)
        for i in range(feature.shape[1]):
            encoder_output,hidden_states = self.encoder_gru(feature[:,i,:],hidden_states)
            encoder_outputs.append(nd.expand_dims(encoder_output,axis=1)) #nbatch*1*256
        encoder_outputs = nd.concat(*encoder_outputs,dim = 1) # nbatch * max_len * 256
        assert encoder_outputs.shape[0]==1

        from random import randint
        if randint(0,10) <= 5:
            encoder_output, label = encoder_outputs[0],labels[0]
            sentence = []
            # outputs = nd.zeros(shape=(1,1),ctx=label.context)
            for iword in range(label.shape[0]-1):
                word = label[iword]
                outputs,hidden_states,weights = self.atten_decoder(word.expand_dims(axis=0),hidden_states,encoder_outputs)
                # outputs,hidden_states,weights = self.atten_decoder(outputs,hidden_states,encoder_outputs)
                sentence.append(outputs) # 1xwords_count
                # outputs = nd.argmax(outputs,axis=1).reshape((1,1))
                # outputs = outputs.detach()
            sentence = nd.concat(*sentence,dim = 0)
            sentence = sentence.expand_dims(axis = 0)
            return sentence
        else:
            encoder_output, label = encoder_outputs[0],labels[0]
            sentence = []
            outputs = nd.zeros(shape=(1,1),ctx=label.context)
            for iword in range(label.shape[0]-1):
                outputs,hidden_states,weights = self.atten_decoder(outputs,hidden_states,encoder_outputs)
                sentence.append(outputs) # 1xwords_count
                outputs = nd.argmax(outputs,axis=1).reshape((1,1))
                outputs = outputs.detach()
            sentence = nd.concat(*sentence,dim = 0)
            sentence = sentence.expand_dims(axis = 0)
            return sentence

class EncoderDecoder2(nn.Block):
    def __init__(self,output_size):
        super(EncoderDecoder2,self).__init__()
        self.feature = FeatureExtracter()
        self.encoder_gru = mx.gluon.rnn.GRUCell(hidden_size = 2048)
        self.fc1 = nn.Dense(2048,flatten=False)
        self.fc2 = nn.Dense(output_size,flatten=False)

        self.embedding = nn.Embedding(input_dim=output_size, output_dim=2048,
                                      weight_initializer=mx.init.Orthogonal(rand_type="normal"))

        self.initialize(init=mx.init.Xavier())

    def forward(self, x,labels):
        feature = self.feature(x)
        assert feature.shape[0]==1
        # feature = nd.transpose(feature,(0,3,1,2)) #n * w * c * h
        feature = nd.reshape(feature,(0,0,-1)) # n * c * input_size
        feature = nd.max(feature,axis=2) # n * c
        feature = self.fc1(feature) # n * 2048
        hidden_states = [feature]
        from random import randint
        # if randint(0,10) <= 5:
        label = labels[0]
        sentence = []
        # outputs = nd.zeros(shape=(1,1),ctx=label.context)
        for iword in range(label.shape[0]-1):
            word = label[iword]
            word_embedded = self.embedding(word.expand_dims(axis=0))[0]
            # print(word_embedded.shape,hidden_states.shape,word.shape)
            outputs,hidden_states = self.encoder_gru(word_embedded,hidden_states)
            sentence.append(self.fc2(outputs)) # 1xwords_count
        sentence = nd.concat(*sentence,dim = 0)
        sentence = sentence.expand_dims(axis = 0)
        return sentence
        # else:
        #     encoder_output, label = encoder_outputs[0],labels[0]
        #     sentence = []
        #     outputs = nd.zeros(shape=(1,1),ctx=label.context)
        #     for iword in range(label.shape[0]-1):
        #         outputs,hidden_states,weights = self.atten_decoder(outputs,hidden_states,encoder_outputs)
        #         sentence.append(outputs) # 1xwords_count
        #         outputs = nd.argmax(outputs,axis=1).reshape((1,1))
        #         outputs = outputs.detach()
        #     sentence = nd.concat(*sentence,dim = 0)
        #     sentence = sentence.expand_dims(axis = 0)
        #     return sentence
        # return sentence

def main():
    LOG_DIR = "./output/latex"
    bs = 100
    logging.basicConfig(level=logging.INFO)
    logging.info("bs:{}".format(bs))
    gpu_ids = [8]
    ctx_list = [mx.gpu(x) for x in gpu_ids]
    dataset = latex_dataset()
    encoderdecoder = EncoderDecoder(output_size=dataset.words_count)

    logging.info(dataset.words_count)

    encoderdecoder.collect_params().reset_ctx(ctx=ctx_list)

    cnn_params = dict(encoderdecoder.collect_params())

    base_lr = 1e-2
    trainer = mx.gluon.Trainer(cnn_params,
                               'SGD',
                               {'learning_rate': base_lr,
                                'wd': 5e-3,
                                'momentum': 0.9,
                                # 'clip_gradient': 1,
                                'multi_precision': True
                                },
                               )
    criterion  = mx.gluon.loss.CTCLoss()
    # criterion = mx.gluon.loss.SoftmaxCrossEntropyLoss(axis=2,batch_axis=0,from_logits=False)
    for nepoch in range(100):
        for nbatch in range(len(dataset)//bs -1 ):
            losses = []
            for i in range(bs):
                batch = dataset[nbatch * bs + i]
                image,label = batch
                image = nd.array(image[np.newaxis],ctx=mx.gpu(gpu_ids[0]))
                label = nd.array(label[np.newaxis],ctx=mx.gpu(gpu_ids[0]))
                with ag.record():
                    decoder_outputs = encoderdecoder(image,label)
                    loss = criterion(decoder_outputs[:,:(label.shape[1]-1)],label[:,1:])
                    loss = nd.sum(loss)
                    losses.append(loss)
                if i==0:
                    try:
                        decoder_outputs = nd.argmax(decoder_outputs,axis=2)
                        print(decoder_outputs.shape)
                        print(str_sum([dataset.index2words[x] for x in decoder_outputs[0].asnumpy()]))
                        print(str_sum([dataset.index2words[x] for x in label[0,1:].asnumpy()]))
                    except Exception:
                        pass
            ag.backward(losses)
            trainer.step(batch_size=len(losses))
            print(sum(losses).asscalar()/len(losses))
if __name__ == "__main__":
    main()