from typing import Dict, Union, Any, Optional, Callable, List, Tuple

from datasets import Dataset
from transformers import Trainer, is_apex_available, PreTrainedModel, TrainingArguments, DataCollator, \
    PreTrainedTokenizerBase, EvalPrediction, TrainerCallback
import torch.nn as nn
import torch
# from transformers.trainer_pt_utils import smp_forward_backward
from transformers.utils import is_sagemaker_mp_enabled
from .tricks import FGM, PGD

if is_sagemaker_mp_enabled():
    import smdistributed.modelparallel.torch as smp


    @smp.step()
    def smp_forward_backward(model, inputs, gradient_accumulation_steps=1):
        outputs = model(**inputs)
        loss = outputs["loss"] if isinstance(outputs, dict) else outputs[0]
        loss /= gradient_accumulation_steps
        model.backward(loss)
        return loss
if is_apex_available():
    from apex import amp


class _Trainer(Trainer):
    def __init__(
            self,
            model: Union[PreTrainedModel, nn.Module] = None,
            args: TrainingArguments = None,
            data_collator: Optional[DataCollator] = None,
            train_dataset: Optional[Dataset] = None,
            eval_dataset: Optional[Dataset] = None,
            tokenizer: Optional[PreTrainedTokenizerBase] = None,
            model_init: Callable[[], PreTrainedModel] = None,
            compute_metrics: Optional[Callable[[EvalPrediction], Dict]] = None,
            callbacks: Optional[List[TrainerCallback]] = None,
            optimizers: Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR] = (None, None),
            preprocess_logits_for_metrics: Callable[[torch.Tensor, torch.Tensor], torch.Tensor] = None,
    ):
        super(_Trainer, self).__init__(
            model, args, data_collator, train_dataset, eval_dataset, tokenizer, model_init, compute_metrics, callbacks,
            optimizers, preprocess_logits_for_metrics
        )
        self.fgm, self.pgd = None, None
        if args.attack == 'fgm':
            self.fgm = FGM(model=model)
            print('正在使用FGM...')
        elif args.attack == 'pgd':
            self.pgd = PGD(model=model)
            self.pgd_k = 3
            print('正在使用PGD...')

    def training_step(self, model: nn.Module, inputs: Dict[str, Union[torch.Tensor, Any]]) -> torch.Tensor:
        """
        Perform a training step on a batch of inputs.

        Subclass and override to inject custom behavior.

        Args:
            model (`nn.Module`):
                The model to train.
            inputs (`Dict[str, Union[torch.Tensor, Any]]`):
                The inputs and targets of the model.

                The dictionary will be unpacked before being fed to the model. Most models expect the targets under the
                argument `labels`. Check your model's documentation for all accepted arguments.

        Return:
            `torch.Tensor`: The tensor with training loss on this batch.
        """
        model.train()
        inputs = self._prepare_inputs(inputs)

        if is_sagemaker_mp_enabled():
            loss_mb = smp_forward_backward(model, inputs, self.args.gradient_accumulation_steps)
            return loss_mb.reduce_mean().detach().to(self.args.device)

        with self.compute_loss_context_manager():
            loss = self.compute_loss(model, inputs)

        if self.args.n_gpu > 1:
            loss = loss.mean()  # mean() to average on multi-gpu parallel training

        if self.args.gradient_accumulation_steps > 1 and not self.deepspeed:
            # deepspeed handles loss scaling by gradient_accumulation_steps in its `backward`
            loss = loss / self.args.gradient_accumulation_steps

        if self.do_grad_scaling:
            self.scaler.scale(loss).backward()
        elif self.use_apex:
            with amp.scale_loss(loss, self.optimizer) as scaled_loss:
                scaled_loss.backward()
        elif self.deepspeed:
            # loss gets scaled under gradient_accumulation_steps in deepspeed
            loss = self.deepspeed.backward(loss)
        else:
            loss.backward()
        # TODO 添加 fgm 和 pgd
        if self.fgm is not None:
            self.fgm.attack()

            loss_adv = model(**inputs).loss
            loss_adv = loss_adv.mean()

            loss_adv.backward()
            self.fgm.restore()
        elif self.pgd is not None:
            self.pgd.backup_grad()
            for _t in range(self.pgd_k):
                self.pgd.attack(is_first_attack=(_t == 0))
                if _t != self.pgd_k - 1:
                    model.zero_grad()
                else:
                    self.pgd.restore_grad()
                loss_adv = model(**inputs).loss
                loss_adv = loss_adv.mean()
                loss_adv.backward()
            self.pgd.restore()

        return loss.detach()
