import torch

from .trainer_utils import TrainerConfig, Trainer
from ..tokenizer.tokenization_dial import Seq2SeqDialTokenizer
from ..dataprocess.dataprocess_dial import Seq2SeqDialDataProcess
from ..models.modeling_seq2seq import Seq2SeqConfig, Seq2SeqLSTM


class Seq2SeqTrainerConfig(TrainerConfig):
    def __init__(self,
                 vocab_size,
                 linear_size_list=[128],
                 max_len=256,
                 label_pad_id=-100,
                 do_lower_case=True,
                 **kwargs):
        self.vocab_size = vocab_size
        self.linear_size_list = linear_size_list
        self.max_len = max_len
        self.label_pad_id = label_pad_id
        self.do_lower_case = do_lower_case
        super(Seq2SeqTrainerConfig, self).__init__(**kwargs)


class Seq2SeqTrainer(Trainer):
    def __init__(self, trainer_config: Seq2SeqTrainerConfig,
                 model: Seq2SeqLSTM = None,
                 data_process: Seq2SeqDialDataProcess = None, **kwargs):
        if model is None:
            model_config = Seq2SeqConfig(vocab_size=trainer_config.vocab_size,
                                         linear_size_list=trainer_config.linear_size_list,
                                         )
            model = Seq2SeqLSTM(config=model_config)
        if data_process is None:
            data_process = Seq2SeqDialDataProcess(dataset_dir=trainer_config.dataset_dir,
                                                  vocab_path=trainer_config.vocab_file,
                                                  do_lower_case=trainer_config.do_lower_case,
                                                  max_len=trainer_config.max_len)
        self.config = trainer_config
        self.tokenizer = Seq2SeqDialTokenizer(vocab_file=self.config.vocab_file,
                                              label_pad_id=self.config.label_pad_id,
                                              max_len=self.config.max_len,
                                              do_lower_case=self.config.do_lower_case,
                                              )
        super(Seq2SeqTrainer, self).__init__(trainer_config=trainer_config,
                                             model=model,
                                             data_process=data_process,
                                             **kwargs)

    def batch_acc(self, output: torch.Tensor, batch: any, device: str) -> float:
        """ 计算周期的acc """
        _, _, label = [item.to(device) for item in batch]
        output = output.contiguous().view(-1, output.size(-1))
        label = label.contiguous().view(-1)
        _, output = output.max(dim=-1)
        non_pad_mask = label.ne(self.config.label_pad_id)
        n_correct = output.eq(label).masked_select(non_pad_mask).sum().item()
        acc = n_correct / non_pad_mask.sum().item()
        return acc

    def model_optimizer(self, model: Seq2SeqLSTM) -> torch.optim.Optimizer:
        """ 获取模型优化器 """
        optimizer = torch.optim.Adam(model.parameters())
        return optimizer

    def test(self, model: Seq2SeqLSTM, device: str) -> None:
        """ 子类实现测试方法，结果写入日志即可 """
        test_dataloader = self.data_process.dataloader('test', batch_size=self.config.batch_size)
        test_step = 2
        try:
            with torch.no_grad():
                for i, batch in enumerate(test_dataloader):
                    if i >= test_step:
                        break
                    src_ids, _, label = [item.to(device) for item in batch]
                    logits = model(x=src_ids, label=label, training=False)

                    _, pre_label = logits.max(dim=-1)
                    pre_label = pre_label.cpu().numpy().tolist()
                    stop_token_id = self.tokenizer.sep_token_id
                    pre_tokens = self.tokenizer.decode(pre_label, skip_special_tokens=True, stop_token_id=stop_token_id)

                    label = label.cpu().numpy().tolist()
                    label_tokens = self.tokenizer.decode(label, skip_special_tokens=True, stop_token_id=stop_token_id)

                    for l, p in zip(label_tokens, pre_tokens):
                        self.logger.info(f"{l}\t{p}")
        except RuntimeError as exception:
            if "out of memory" in str(exception):
                self.logger.info("WARNING: val out of memory")
                if hasattr(torch.cuda, 'empty_cache'):
                    torch.cuda.empty_cache()
            else:
                self.logger.info(str(exception))
                raise exception

    def model_forward(self, model: Seq2SeqLSTM, batch: any, device: str) -> (torch.Tensor, torch.Tensor):
        """ 模型运行一个周期，返回 loss, output """
        src_ids, trg_ids, label = [item.to(device) for item in batch]
        loss, output = model(x=[src_ids, trg_ids], label=label, training=True)
        return loss, output


def train_sort_for_seq2seq(train_flag='20220918_sort_seq2seq'):
    train_config = Seq2SeqTrainerConfig(train_flag=train_flag,
                                        vocab_size=67,
                                        vocab_file="data/dataset/d101_sort/vocab.txt",
                                        dataset_dir="data/dataset/d101_sort",
                                        linear_size_list=[512],
                                        max_len=256,
                                        log_n_step=100,
                                        do_lower_case=False,
                                        )
    trainer = Seq2SeqTrainer(trainer_config=train_config)
    trainer.train()









