import logging

import sacrebleu
import torch
from tqdm import tqdm

import config
from beam_decoder import beam_search
from model import batch_greedy_decode
from utils import chinese_tokenizer_load


def run_epoch(dataloader, model, loss_compute, optimizer=None):
    total_tokens = 0.0
    total_loss = 0.0

    for batch in tqdm(dataloader):
        out = model(batch.src, batch.tgt, batch.src_mask, batch.tgt_mask)
        # loss = loss_compute(out, batch.tgt_y, batch.ntokens)
        out_tmp = out.reshape(-1, out.shape[-1])
        tgt_y_tmp = batch.tgt_y.reshape(-1)
        loss = loss_compute(out_tmp, tgt_y_tmp)

        total_loss += loss.item()
        total_tokens += batch.ntokens
        if optimizer is not None:
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
    return total_loss / total_tokens


# def train(train_dataloader, valid_dataloader, model, model_par, criterion, optimizer):
def train(train_dataloader, valid_dataloader, model, criterion, optimizer):
    """训练并保存模型"""
    # 初始化模型在dev集上的最优Loss为一个较大值
    best_bleu_score = 0.0
    early_stop = config.early_stop
    train_loss_list, valid_loss_list = [], []
    valid_bleu_list = []
    for epoch in range(1, config.epoch_num + 1):
        # 模型训练
        model.train()
        train_loss = run_epoch(train_dataloader, model, criterion, optimizer)
        train_loss_list.append(train_loss)
        logging.info("Epoch: {}, loss: {}".format(epoch, train_loss))

        # 模型验证
        model.eval()
        valid_loss = run_epoch(valid_dataloader, model, criterion)
        valid_loss_list.append(valid_loss)
        bleu_score = evaluate(valid_dataloader, model, use_beam=False)
        valid_bleu_list.append(bleu_score)
        logging.info("Epoch: {}, Valid loss: {}, Bleu Score: {}".format(epoch, valid_loss, bleu_score))

        # 如果当前epoch的模型在dev集上的loss优于之前记录的最优loss则保存当前模型，并更新最优loss值
        if bleu_score > best_bleu_score:
            torch.save(model.state_dict(), config.model_path)
            best_bleu_score = bleu_score
            early_stop = config.early_stop
            logging.info("-------- Save Best Model! --------")
        # else:
        #     early_stop -= 1
        #     logging.info("Early Stop Left: {}".format(early_stop))
        # if early_stop == 0:
        #     logging.info("-------- Early Stop! --------")
        #     break
    return train_loss_list, valid_loss_list, valid_bleu_list


class LossCompute:
    """简单的计算损失和进行参数反向传播更新训练的函数"""

    def __init__(self, generator, criterion, opt=None):
        self.generator = generator
        self.criterion = criterion
        self.opt = opt

    def __call__(self, x, y, norm):
        x = self.generator(x)
        loss = self.criterion(x.contiguous().view(-1, x.size(-1)), y.contiguous().view(-1)) / norm
        loss.backward()
        if self.opt is not None:
            self.opt.step()
            if config.use_noamopt:
                self.opt.optimizer.zero_grad()
            else:
                self.opt.zero_grad()
        return loss.data.item() * norm.float()


def evaluate(dataloader, model, mode="valid", use_beam=True):
    """在data上用训练好的模型进行预测，打印模型翻译结果"""
    tokenizer_zh = chinese_tokenizer_load()
    trg = []
    res = []
    with torch.no_grad():
        # 在data的英文数据长度上遍历下标
        for batch in tqdm(dataloader):
            # 对应的中文句子
            cn_sent = batch.tgt_text
            src = batch.src
            src_mask = (src != 0).unsqueeze(-2)
            if use_beam:
                decode_result, _ = beam_search(
                    model,
                    src,
                    src_mask,
                    config.max_len,
                    config.padding_idx,
                    config.bos_idx,
                    config.eos_idx,
                    config.beam_size,
                    config.device,
                )
            else:
                decode_result = batch_greedy_decode(model, src, src_mask, max_len=config.max_len)
            decode_result = [h[0] for h in decode_result]
            # translation = [tokenizer_zh.decode_ids(_s) for _s in decode_result]
            translation = [tokenizer_zh.convert_ids_to_tokens(_s) for _s in decode_result]
            trg.extend(cn_sent)
            res.extend(translation)
    if mode == "test":
        with open(config.output_path, "w") as fp:
            for i in range(len(trg)):
                line = "idx:" + str(i) + trg[i] + "|||" + res[i] + "\n"
                fp.write(line)
    trg = [trg]
    bleu = sacrebleu.corpus_bleu(res, trg, tokenize="zh")
    return float(bleu.score)


def test(dataloader, model, criterion):
    with torch.no_grad():
        # 加载模型
        model.load_state_dict(torch.load(config.model_path))
        model.eval()
        # 开始预测
        test_loss = run_epoch(dataloader, model, criterion)
        bleu_score = evaluate(dataloader, model, "test", use_beam=False)
        logging.info("Test loss: {},  Bleu Score: {}".format(test_loss, bleu_score))
    return bleu_score


def translate(src, model, use_beam=True):
    """用训练好的模型进行预测单句，打印模型翻译结果"""
    sp_chn = chinese_tokenizer_load()
    with torch.no_grad():
        model.load_state_dict(torch.load(config.model_path))
        model.eval()
        src_mask = (src != 0).unsqueeze(-2)
        if use_beam:
            decode_result, _ = beam_search(
                model,
                src,
                src_mask,
                config.max_len,
                config.padding_idx,
                config.bos_idx,
                config.eos_idx,
                config.beam_size,
                config.device,
            )
            decode_result = [h[0] for h in decode_result]
        else:
            decode_result = batch_greedy_decode(model, src, src_mask, max_len=config.max_len)
        translation = [sp_chn.decode_ids(_s) for _s in decode_result]
        print(translation[0])
