import os.path

import torch
from tqdm.auto import tqdm
from transformers import BertTokenizer, get_linear_schedule_with_warmup, logging
from loguru import logger
from common.config import Config
from common.utils import seed_everything

logging.set_verbosity_error()


class Trainer:
    def __init__(self, config: Config):
        self.config = config

    def load_dataset(self, mode, **kwargs):
        tokenizer = BertTokenizer.from_pretrained(self.config.model_config.tokenizer_path)
        dataset = self.config.dataset_class(config=self.config, mode=mode, tokenizer=tokenizer, **kwargs)
        if mode == "test":
            dataloader = self.config.dataloader_class(dataset, batch_size=1, shuffle=False)
        else:
            dataloader = self.config.dataloader_class(dataset, batch_size=self.config.model_config.batch_size)
        # setattr(self, mode + "_dataloader", dataloader)
        self.config.model_config.num_labels = dataset.num_labels
        self.config.model_config.id2label = dataset.id2label
        self.config.model_config.markup = dataset.markup
        logger.info("The {} {} dataset  has been loaded.".format(self.config.dataset, mode))
        return dataset, dataloader

    def load_model(self):
        # 需要先加载Data,再加载模型
        model = self.config.model_class(self.config.model_config)
        try:
            model_file_path = os.path.join(self.config.model_config.save_path,
                                           self.config.model_config.save_name)
            load_mode = self.config.model_config.load_mode
        except AttributeError:
            model_file_path = None
            load_mode = None
        if model_file_path is not None and load_mode is not None:
            model.load_model(model_file_path, load_mode)
            logger.info("The {} model has been loaded.".format(self.config.model))
        else:
            logger.info("The {} model has been initialized.".format(self.config.model))
        return model

    def train(self, model, train_loader, dev_loader=None):
        # 在train之前需要先加载数据
        args = self.config.model_config
        if args.evaluate_during_training:
            assert dev_loader is not None
            # self.load_dataset('dev')

        model.to(args.device)
        # 训练前初始化
        t_total = len(train_loader) * args.num_train_epochs
        # Prepare optimizer and schedule (linear warmup and decay)
        no_decay = args.no_decay
        logger.info("no_decay:{}".format(no_decay))
        optimizer_grouped_parameters = [
            {"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
             "weight_decay": args.weight_decay, },
            {"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],
             "weight_decay": 0.0},
        ]
        warmup_steps = int(t_total * args.warmup_proportion)
        optimizer_config = self.config.optimizer_config
        optimizer = optimizer_config.optimizer_class(optimizer_grouped_parameters, **optimizer_config.params)
        scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=warmup_steps,
                                                    num_training_steps=t_total)
        # 训练
        logger.info("***** Running training *****")
        # 保存模型
        model.zero_grad()
        seed_everything(args.seed)
        global_step = 0
        for epoch in tqdm(range(int(args.num_train_epochs)), position=0, leave=True):
            for batch in tqdm(train_loader, position=0, leave=True, miniters=3):
                model.train()
                batch = tuple(t.to(args.device) for t in batch)
                inputs = {"input_ids": batch[0], "token_type_ids": batch[2], "attention_mask": batch[1],
                          "labels": batch[3]}
                outputs = model(**inputs)
                loss = outputs[0]
                loss.backward()
                # train_dataloader_bar.set_description("epoch:{},loss:{}".format(epoch, logging_loss))
                try:
                    args.max_grad_norm = float(args.max_grad_norm)
                    torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
                except KeyError:
                    pass
                optimizer.step()
                scheduler.step()
                model.zero_grad()
                if global_step % args.logging_steps == 0:
                    logger.info("Epoch:{}\tStep:\t{}/{}\tLoss:{}".format(epoch, global_step, t_total, loss.item()))
                global_step += 1
            if args.evaluate_during_training:
                self.evalutate(model, dev_loader, epoch=epoch)
            if 'cuda' in str(args.device):
                torch.cuda.empty_cache()
        logger.info("***** Training Finished *****")
        # TODO:保存模型的逻辑需要细化
        self.save_model(model)

    def evalutate(self, model, dev_dataloader, epoch=None):
        seed_everything(self.config.model_config.seed)
        model.to(self.config.model_config.device)
        eval_loss, eval_info = model.evaluate(dev_dataloader)
        logger.info("***** Eval results *****")
        print(eval_info)
        return eval_loss, eval_info

    def predict(self, model, test_dataloader):
        seed_everything(self.config.model_config.seed)
        model.to(self.config.model_config.device)
        tokenizer = BertTokenizer.from_pretrained(self.config.model_config.tokenizer_path)
        test_result = model.predict(test_dataloader, tokenizer)
        return test_result

    def save_model(self, model):
        mode = self.config.model_config.save_mode
        assert mode in ["all", "embedding", "model"]
        if mode == "all":
            save_full_name = os.path.join(self.config.model_config.save_path,
                                          self.config.model_config.save_name)
            model.save_model(save_full_name, mode)
        else:
            raise NotImplementedError


if __name__ == '__main__':
    config = Config('bertsoftmax_cluner.yaml')
    trainer = Trainer(config)
    dataset, train_loader = trainer.load_dataset('train')
    _, dev_loader = trainer.load_dataset('dev')
    # 训练模型
    trainer.train(train_loader, dev_loader)
    # 加载模型测试
    # dev_dataloader = trainer.load_dataset('dev')
    # model = trainer.load_model()
    # trainer.evalutate(model, dev_dataloader)
    # trainer.load_dataset('test')
    # model = trainer.load_model()
    # results = trainer.predict(model, trainer.test_dataloader)
    # print(results)
