from models.seq2seq import Seq2seq
from utils.data.loader import data_loader, test_loader
import torch
import torch.nn.functional as F
from config import Config
from tqdm import tqdm
from torch import optim
from modules.wrappers.beam import BeamSearchWrapper
from modules.wrappers.sample import SampleDecoderWrapper

if torch.cuda.is_available():
    torch.cuda.set_device(Config.DEVICE_ID)


class Translator:
    def __init__(self, model, replace_unk=True):
        self.model = model
        self.replace_unk = replace_unk
        self.wrapped_decoder = SampleDecoderWrapper(self.model.decoder)
        # self.wrapped_decoder = BeamSearchWrapper(self.model.decoder)

    def translate_batch(self,
                        src_batch,
                        src_lengths,
                        tgt_batch,
                        tgt_lengths):

        with torch.no_grad():
            # 计算loss
            encoder_outputs, final_hidden_state, final_cell_state = self.model.encoder(src_batch, src_lengths)
            dist, _ = self.model.decoder(src_batch=src_batch,
                                         src_lengths=src_lengths,
                                         tgt_batch=tgt_batch[:, 1:],
                                         final_encoder_hidden_state=final_hidden_state,
                                         final_encoder_cell_state=final_cell_state,
                                         encoder_outputs=encoder_outputs)
            tgt_batch = tgt_batch[:, :-1].reshape(-1)
            dist = dist.view(-1, self.model.decoder.vocab_size)
            loss = F.nll_loss(dist.log(), tgt_batch, ignore_index=self.model.decoder.vocab[Config.PAD_TOKEN],
                              reduction='mean')

            # 预测
            predicts, score, attn_history = self.wrapped_decoder.decode(final_hidden_state,
                                                                        final_cell_state,
                                                                        encoder_outputs,
                                                                        src_batch,
                                                                        src_lengths)

        predicts = self.ids2words(self.model.decoder.vocab, predicts, src_batch, attn_history)
        return predicts, loss

    def ids2words(self, vocab, predicts, src_batch, alignments):
        eos_id = vocab[Config.EOS_TOKEN]
        unk_id = vocab[Config.UNK_TOKEN]
        # remove_eos:
        word_ids = [s[:-1] if s[-1] == eos_id else s for s in predicts]

        if not self.replace_unk:
            return [[vocab.get_itos()[w] for w in s] for s in word_ids]
        else:
            return [[vocab.get_itos()[w] if w != unk_id else rs[a[i].argmax()] \
                     for i, w in enumerate(s)] for s, rs, a in zip(word_ids, src_batch, alignments)]

    def eval(self, data):
        self.model.eval()
        results = []
        special_tokens = self.model.decoder.vocab.lookup_indices(
            [Config.EOS_TOKEN, Config.BOS_TOKEN, Config.UNK_TOKEN, Config.PAD_TOKEN])
        pbar = tqdm(data, desc='Translating...')
        for batch in pbar:
            src_batch = batch['methods']
            src_lengths = torch.Tensor(batch["src_methods_len"]).to(torch.int)
            tgt_batch = batch['summaries']
            raw_batch = [self.model.decoder.vocab.lookup_tokens([i for i in tgt[1:] if i not in special_tokens]) for tgt
                         in tgt_batch]
            tgt_lengths = torch.Tensor(batch["src_summaries_len"]).to(torch.int)
            predicts, loss = self.translate_batch(src_batch, src_lengths, tgt_batch, tgt_lengths)
            torch.cuda.empty_cache()
            reports = {'loss': loss.item(), 'bleu': batch_bleu(predicts, raw_batch) * 100}
            results.append(reports)

        return results


class Trainer:
    def __init__(self, model, name, optimizer, dataloader, testloader, num_epochs, save_per_epoch=True):
        self.model = model
        self.model_name = name
        self.optimizer = optimizer
        self.dataloader = dataloader
        self.testloader = testloader
        self.num_epochs = num_epochs
        self.cur_epoch = 0
        self.iteration = 0
        self.save_path = Config.BASE_DIR / 'resource' / 'model' / self.model_name
        self.save_per_epoch = save_per_epoch
        # self.scheduler = optim.lr_scheduler.ExponentialLR(self.optimizer, 0.9)

    def run_train(self):
        self.model.train()
        for epoch_index in range(self.cur_epoch, self.num_epochs):
            self.train_epoch()
            if self.save_per_epoch:
                self.save_states()
            self.cur_epoch += 1

    def train_epoch(self, ):
        dataloader = tqdm(self.dataloader)
        dataloader.set_description("Training epoch {0}:".format(self.cur_epoch))
        for batch in dataloader:
            self.optimizer.zero_grad()
            src_batch = batch['methods']
            src_lengths = torch.Tensor(batch["src_methods_len"]).to(torch.int)
            tgt_batch = batch['summaries']
            dist, attention = self.model(src_batch=src_batch,
                                         src_lengths=src_lengths,
                                         tgt_batch=tgt_batch)

            # 要在tgt_batch[:,1:]和dist之间计算loss
            tgt_batch = tgt_batch[:, 1:].reshape(-1)
            dist = dist.view(-1, dist.shape[-1])
            # dist : [batch_size, tgt_len, vocab_size] -> [batch_size * tgt_len, vocab_size]
            # tgt_batch : [batch_size, tgt_len]

            loss = F.nll_loss(dist.log(), tgt_batch, ignore_index=self.model.decoder.vocab[Config.PAD_TOKEN],
                              reduction='mean')
            # NLL loss : The input given through a forward call is expected to contain log-probabilities of each class
            loss.backward()
            torch.nn.utils.clip_grad_norm_(mod.parameters(), 5)
            self.optimizer.step()
            self.iteration += 1
            # self.scheduler.step()
            dataloader.set_postfix(loss="%.4f" % loss.item())

    def evaluation(self):
        self.model.eval()
        with torch.no_grad():
            translator = Translator(self.model, replace_unk=True)
            results = translator.eval(self.testloader)
            for each in results:
                print(each)

    def save_states(self):
        print('Saving model and settings...')
        checkpoint = {
            "model": self.model.state_dict(),
            "optimizer": self.optimizer.state_dict(),
            "epoch": self.cur_epoch}
        save_path = self.save_path / (self.model_name + "_epoch_{0}".format(self.cur_epoch) + ".pt")
        torch.save(checkpoint, save_path)

    def load_states(self, load_path):
        print('Loading model and settings from checkpoint...')
        checkpoint = torch.load(load_path)
        self.model.load_state_dict(checkpoint['model'])
        self.optimizer.load_state_dict(checkpoint['optimizer'])
        self.cur_epoch = checkpoint['epoch']


mod = Seq2seq()
mod.to(Config.DEVICE)
# print(mod)
trainer = Trainer(model=mod,
                  name='seq2seq',
                  optimizer=optim.Adam(mod.parameters(), lr=Config.LR),
                  dataloader=data_loader,
                  testloader=test_loader,
                  num_epochs=100)
trainer.run_train()
# trainer.evaluation()
