import os

import torch

from .trainer_utils import TrainerConfig, Trainer
from ..tokenizer.tokenization_bert import BertTokenizer
from ..dataprocess.dataprocess_classification import ClassificationDataProcess
from ..models.modeling_bert import BertConfig, BertForSequenceClassification
from ..utils import file_utils
from ..optimizer.bert_adam import BertAdam


class ClassificationTrainerConfig(TrainerConfig):
    def __init__(self,
                 train_flag,
                 model_name,
                 dataset_name,
                 vocab_size=21130,
                 num_classes=10,
                 max_len=256,
                 lr=2.5e-5,
                 eps=1.0e-09,
                 do_lower_case=True,
                 dataset_vocab_name='vocab_char.txt',
                 return_type_ids=True,
                 return_mask_ids=True,
                 **kwargs):
        self.model_name = model_name
        self.dataset_name = dataset_name
        self.vocab_size = vocab_size
        self.num_classes = num_classes
        self.max_len = max_len
        self.lr = lr
        self.eps = eps
        self.do_lower_case = do_lower_case
        self.dataset_vocab_name = dataset_vocab_name
        self.return_type_ids = return_type_ids
        self.return_mask_ids = return_mask_ids
        super(ClassificationTrainerConfig, self).__init__(train_flag=train_flag, **kwargs)


class ClassificationTrainer(Trainer):
    def __init__(self, config: ClassificationTrainerConfig, **kwargs):
        self.config = config

        self.support_bert_models = ['bert_base_chinese',
                                    'ernie',
                                    'roberta_tiny_clue',
                                    'roberta_tiny_pair']

        if config.dataset_name not in file_utils.dataset_dir_map:
            raise ValueError("请检查数据库")

        dataset_info = file_utils.dataset_dir_map[config.dataset_name]
        dataset_dir = dataset_info['dataset_dir']
        n_cls = dataset_info['n_cls']
        if n_cls:
            self.config.num_classes = n_cls

        vocab_path = None
        if config.model_name in self.support_bert_models:
            model_file_info = file_utils.model_file_info_map[config.model_name]
            pretrained_path = model_file_info['model_dir']
            model = BertForSequenceClassification.from_pretrained(model_dir=pretrained_path,
                                                                  num_labels=self.config.num_classes)
            self.config.vocab_size = model.config.vocab_size
            vocab_path = model_file_info['vocab_path']
        else:
            raise ValueError("请检查模型")

        if vocab_path is None:
            if os.path.exists(os.path.join(dataset_dir, config.dataset_vocab_name)):
                vocab_path = os.path.join(dataset_dir, config.dataset_vocab_name)
            else:
                raise ValueError(f"词表文件未查找到，请检查")

        data_process = ClassificationDataProcess(dataset_dir=dataset_dir,
                                                 vocab_path=vocab_path,
                                                 flag=self.config.model_name,
                                                 do_lower_case=config.do_lower_case,
                                                 max_len=config.max_len,
                                                 return_type_ids=self.config.return_type_ids,
                                                 return_mask_ids=self.config.return_mask_ids,
                                                 )
        self.tokenizer = BertTokenizer(vocab_file=vocab_path,
                                       max_len=self.config.max_len,
                                       do_lower_case=self.config.do_lower_case)
        super(ClassificationTrainer, self).__init__(trainer_config=config,
                                                    model=model,
                                                    data_process=data_process,
                                                    **kwargs)

    def batch_acc(self, output: torch.Tensor, batch: any, device: str) -> float:
        """ 计算周期的acc """
        labels = batch['label_ids'].to(device)
        output = output.contiguous().view(-1, output.size(-1))
        labels = labels.contiguous().view(-1)
        _, output = output.max(dim=-1)
        n_correct = output.eq(labels).sum().item()
        acc = n_correct / len(labels)
        return acc

    def model_optimizer(self, model) -> torch.optim.Optimizer:
        """ 获取模型优化器 """
        optimizer = BertAdam(model.parameters(), lr=self.config.lr, eps=self.config.eps)
        return optimizer

    def test(self, model, device: str) -> None:
        """ 子类实现测试方法，结果写入日志即可 """
        test_loader = self.data_process.dataloader(mode='test')
        loss, acc = self.valid(model=model, val_loader=test_loader, device=device)
        self.logger.info(f"test loss:{loss: .4f}  acc:{acc:.4f}")

    def model_forward(self, model, batch: any, device: str) -> (torch.Tensor, torch.Tensor):
        """ 模型运行一个周期，返回 loss, output """
        input_ids = batch['input_ids'].to(device)
        type_ids = batch['type_ids'].to(device) if 'type_ids' in batch else None
        mask_ids = batch['mask_ids'].to(device) if 'mask_ids' in batch else None
        label_ids = batch['label_ids'].to(device)

        if self.config.model_name in self.support_bert_models:
            loss, output = model(input_ids=input_ids,
                                 attention_mask=mask_ids,
                                 token_type_ids=type_ids,
                                 labels=label_ids,
                                 return_dict=False)
        else:
            loss, output = None, None
        return loss, output


def train_classification(train_flag,
                         model_name,
                         dataset_name,
                         vocab_size=21130,
                         num_classes=10,
                         max_len=256,
                         lr=2.5e-5,
                         do_lower_case=True,
                         dataset_vocab_name='vocab_char.txt',
                         continue_train=True,  # 继续训练，True 接着之前的模型继续训练
                         n_epochs=50,  # 训练周期数
                         early_stopping_patience=3,  # 提前停止
                         batch_size=128,  # 批次大小
                         warmup_steps=1000,
                         gradient_accumulation_steps=1,
                         max_grad_norm=4.0,  # 最大梯度
                         device='cuda',
                         log_n_step=100,  # 多少step 记录一次
                         save_model_n_step=-1,  # 多少步存储一次模型
                         only_save_best_model=True,
                         adversarial_train=None,  # FGM, PGD
                         adversarial_embed='word_embeddings',
                         adversarial_epsilon=1.0,
                         ):
    config = ClassificationTrainerConfig(train_flag=train_flag,
                                         model_name=model_name,
                                         dataset_name=dataset_name,
                                         vocab_size=vocab_size,
                                         num_classes=num_classes,
                                         max_len=max_len,
                                         lr=lr,
                                         do_lower_case=do_lower_case,
                                         dataset_vocab_name=dataset_vocab_name,
                                         continue_train=continue_train,  # 继续训练，True 接着之前的模型继续训练
                                         n_epochs=n_epochs,  # 训练周期数
                                         early_stopping_patience=early_stopping_patience,  # 提前停止
                                         batch_size=batch_size,  # 批次大小
                                         warmup_steps=warmup_steps,
                                         gradient_accumulation_steps=gradient_accumulation_steps,
                                         max_grad_norm=max_grad_norm,  # 最大梯度
                                         device=device,
                                         log_n_step=log_n_step,  # 多少step 记录一次
                                         save_model_n_step=save_model_n_step,
                                         only_save_best_model=only_save_best_model,
                                         adversarial_train=adversarial_train,
                                         adversarial_embed=adversarial_embed,
                                         adversarial_epsilon=adversarial_epsilon,
                                         )
    trainer = ClassificationTrainer(config=config)
    trainer.train()











