import logging

import numpy as np
import torch
from torch.utils.data import DataLoader

import config
import utils
from data_loader import pre_news_commentary
from model import make_model
from train import test, train, translate
from utils import english_tokenizer_load, set_seed, draw_training


class NoamOpt:
    """Optim wrapper that implements rate."""

    def __init__(self, model_size, factor, warmup, optimizer):
        self.optimizer = optimizer
        self._step = 0
        self.warmup = warmup
        self.factor = factor
        self.model_size = model_size
        self._rate = 0

    def step(self):
        """Update parameters and rate"""
        self._step += 1
        rate = self.rate()
        for p in self.optimizer.param_groups:
            p["lr"] = rate
        self._rate = rate
        self.optimizer.step()

    def rate(self, step=None):
        """Implement `lrate` above"""
        if step is None:
            step = self._step
        return self.factor * (self.model_size ** (-0.5) * min(step ** (-0.5), step * self.warmup ** (-1.5)))


def get_std_opt(model):
    """for batch_size 32, 5530 steps for one epoch, 2 epoch for warm-up"""
    return NoamOpt(
        model.src_embed[0].d_model,
        1,
        10000,
        torch.optim.Adam(model.parameters(), lr=0, betas=(0.9, 0.98), eps=1e-9),
    )


def run():
    utils.set_logger(config.log_path)

    # train_dataset = MTDataset(config.train_data_path)
    # valid_dataset = MTDataset(config.dev_data_path)
    # test_dataset = MTDataset(config.test_data_path)

    train_dataset, valid_dataset, test_dataset = pre_news_commentary(config.new_commentary_path)

    logging.info("-------- Dataset Build! --------")
    train_dataloader = DataLoader(
        train_dataset,
        shuffle=True,
        batch_size=config.batch_size,
        collate_fn=train_dataset.collate_fn,
    )
    valid_dataloader = DataLoader(
        valid_dataset,
        shuffle=False,
        batch_size=config.batch_size,
        collate_fn=valid_dataset.collate_fn,
    )
    test_dataloader = DataLoader(
        test_dataset,
        shuffle=False,
        batch_size=config.batch_size,
        collate_fn=test_dataset.collate_fn,
    )

    logging.info("-------- Get Dataloader! --------")
    # 初始化模型
    model = make_model(
        train_dataset.tokenizer_en.vocab_size,
        train_dataset.tokenizer_zh.vocab_size,
        train_dataset.src_pad_id,
        train_dataset.tgt_pad_id,
        config.n_layers,
        config.d_model,
        config.d_ff,
        config.n_heads,
        config.dropout,
    )
    # 训练
    # if config.use_smoothing:
    #     criterion = LabelSmoothing(
    #         size=config.tgt_vocab_size, padding_idx=config.padding_idx, smoothing=0.1
    #     )
    #     criterion.cuda()
    # else:
    criterion = torch.nn.CrossEntropyLoss(ignore_index=train_dataset.tgt_pad_id, reduction="sum")
    # if config.use_noamopt:
    #     optimizer = get_std_opt(model)
    # else:
    #     optimizer = torch.optim.AdamW(model.parameters(), lr=config.lr)
    optimizer = torch.optim.AdamW(model.parameters(), lr=config.lr)
    train_loss_list, valid_loss_list, valid_bleu_list = train(
        train_dataloader, valid_dataloader, model, criterion, optimizer
    )
    draw_training(train_loss_list, valid_loss_list, "./figs/back_commentary.png")
    print("Train BLEU Score:")
    train_bleu = test(train_dataloader, model, criterion)
    print("Valid BLEU Score:")
    valid_bleu = test(valid_dataloader, model, criterion)
    print("Test BLEU Score:")
    test_bleu = test(test_dataloader, model, criterion)
    with open(config.log_path, "a") as fp:
        fp.write("Train BLEU Score: {}\n".format(train_bleu))
        fp.write("Valid BLEU Score: {}\n".format(valid_bleu))
        fp.write("Test BLEU Score: {}\n".format(test_bleu))


def check_opt():
    """check learning rate changes"""
    import matplotlib.pyplot as plt
    import numpy as np

    model = make_model(
        config.src_vocab_size,
        config.tgt_vocab_size,
        config.n_layers,
        config.d_model,
        config.d_ff,
        config.n_heads,
        config.dropout,
    )
    opt = get_std_opt(model)
    # Three settings of the lrate hyperparameters.
    opts = [opt, NoamOpt(512, 1, 20000, None), NoamOpt(256, 1, 10000, None)]
    plt.plot(np.arange(1, 50000), [[opt.rate(i) for opt in opts] for i in range(1, 50000)])
    plt.legend(["512:10000", "512:20000", "256:10000"])
    plt.show()


def one_sentence_translate(sent, beam_search=True):
    # 初始化模型
    model = make_model(
        config.src_vocab_size,
        config.tgt_vocab_size,
        config.n_layers,
        config.d_model,
        config.d_ff,
        config.n_heads,
        config.dropout,
    )
    BOS = english_tokenizer_load().bos_id()  # 2
    EOS = english_tokenizer_load().eos_id()  # 3
    src_tokens = [[BOS] + english_tokenizer_load().EncodeAsIds(sent) + [EOS]]
    batch_input = torch.LongTensor(np.array(src_tokens)).to(config.device)
    translate(batch_input, model, use_beam=beam_search)


def translate_example():
    """单句翻译示例"""
    sent = (
        "The near-term policy remedies are clear: raise the minimum wage to a level that will keep a "
        "fully employed worker and his or her family out of poverty, and extend the earned-income tax credit "
        "to childless workers."
    )
    # tgt: 近期的政策对策很明确：把最低工资提升到足以一个全职工人及其家庭免于贫困的水平，扩大对无子女劳动者的工资所得税减免。
    one_sentence_translate(sent, beam_search=True)


if __name__ == "__main__":
    import os

    os.environ["CUDA_VISIBLE_DEVICES"] = "0"
    import warnings

    config.log_path = os.path.join(config.log_path, "commentary.log")
    config.model_path = os.path.join(config.model_path, "commentary.pth")
    config.output_path = os.path.join(config.output_path, "commentary.txt")

    warnings.filterwarnings("ignore")
    set_seed(10)
    run()
    # translate_example()
