def amp_batch_processor_dino(model, data_batch, use_amp=True, **kwargs):
    """
    data_batch: tuple(img,text)
    """
    log_vars = OrderedDict()  # a dict for printing logs
    num_samples = len(data_batch[0])
    # print(data_batch[-1])
    image, text = data_batch[:2]
    with torch.cuda.amp.autocast(enabled=use_amp):
        start = time.time()
        output = model(image, text)
        # {'dino_loss': tensor(10.3833, device='cuda:0', grad_fn=<DivBackward0>),
        # 'clip_loss': tensor(1.0597, device='cuda:0', grad_fn=<DivBackward0>),
        # 'cluster_loss_hA': tensor(-0.0082, device='cuda:0', grad_fn=<DivBackward0>),
        # 'cluster_loss_hB': tensor(-0.0082, device='cuda:0', grad_fn=<DivBackward0>),
        # 'loss': tensor(11.4422, device='cuda:0', grad_fn=<AddBackward0>)}
        # print(time.time()-start)
    # clip_loss = output['clip_loss']
    # dino_loss = output['dino_loss']

    loss = output['loss']
    log_vars.update(output)
    for name, res in log_vars.items():
        # reduce results when distributed training
        if is_distributed():
            res = res.data.clone()
            dist.all_reduce(res.div_(dist.get_world_size()))
        log_vars[name] = res.item()
    return dict(loss=loss, log_vars=log_vars, num_samples=num_samples)


class DINOAMPRunner(CLIPAMPRunner):
    def __init__(self,
                 model,
                 clip_model=None,
                 train_dataloader=None,
                 val_dataloader=None,
                 train_tech=dict(),
                 batch_processor=amp_batch_processor_dino,
                 **kwargs):
        self.train_dataloader = train_dataloader
        self.val_dataloader = val_dataloader
        self.train_tech = train_tech
        self._mox_tasks = []
        self.scaler = GradScaler(enabled=self.use_amp)
        self.clip_model = clip_model  #加这个只是为了testseg
        batch_processor = partial(batch_processor, use_amp=self.use_amp)
        super().__init__(
            model,
            train_dataloader=train_dataloader,
            val_dataloader=val_dataloader,
            train_tech=train_tech,
            batch_processor=batch_processor,
            **kwargs)

    def run(self, **kwargs):
        """Start running.

        Args:
            data_loaders (list[:obj:`DataLoader`]): Dataloaders for training
                and validation.
        """
        assert self._max_epochs is not None, (
            'max_epochs must be specified during instantiation')
        self._max_iters = self._max_epochs * len(self.train_dataloader)
        self.logger.info(f'Start training, distributed training: {self.world_size > 1}, world size={self.world_size}')
        self.logger.info(f'host: {get_host_info()}, work_dir: {self.work_dir}')
        self.call_hook('before_run')

        print("Starting DINO training !")
        for epoch in range(self._max_epochs):
            try:
                self.model.module.dino_loss.update_epoch(epoch)
            except:
                print("error occur in runner file")
            self.train(self.train_dataloader, **kwargs)

        time.sleep(0)  # wait for some hooks like loggers to finish
        self.call_hook('after_run')

    def test(self, **kwargs):
        """"test
        """
        self.logger.info(f"Starting test!")
        self.call_hook("before_run")

    def train(self, data_loader, **kwargs):
        self.model.train()
        self.mode = 'train'
        self.data_loader = data_loader
        self._max_iters = self._max_epochs * len(self.data_loader)
        self.call_hook('before_train_epoch')

        time.sleep(0)  # Prevent possible deadlock during epoch transition

        # try:
        for i, data_batch in enumerate(self.data_loader):
            try:
                self.model.module.update_it(i)
                self.model.module.dino_loss.update_iter(self._iter)
            except:
                print("error occur in runner file")

            self._inner_iter = i
            self.call_hook('before_train_iter')
            self.run_iter(data_batch, train_mode=True, **kwargs)
            self.call_hook('after_train_iter')
            # self.call_hook('after_train_epoch')
            self._iter += 1

        # except:
        #     print(self.data_loader.sampler.data_source.data[i])

        self.call_hook('after_train_epoch')
        self._epoch += 1


class AMPDINOOptimizerHook(AMPOptimizerHook):

    def __init__(self,grad_clip=None, record_opt_state=False):
        super().__init__(grad_clip=grad_clip, record_opt_state=record_opt_state)

    def after_train_iter(self, runner):
        #print("!!!!!!!!!!!!!!!!!")
        #print(runner.outputs)
        '''
        {'loss': tensor(10.9266, device='cuda:1', grad_fn=<DivBackward0>), 'log_vars': OrderedDict([('dino_loss', 10.948820114135742), ('loss', 10.948820114135742)]), 'num_samples': 13}
        {'loss': tensor(10.9639, device='cuda:2', grad_fn=<DivBackward0>), 'log_vars': OrderedDict([('dino_loss', 10.948820114135742), ('loss', 10.948820114135742)]), 'num_samples': 13}
        {'loss': tensor(10.9537, device='cuda:0', grad_fn=<DivBackward0>), 'log_vars': OrderedDict([('dino_loss', 10.948820114135742), ('loss', 10.948820114135742)]), 'num_samples': 13}
        {'loss': tensor(10.9511, device='cuda:3', grad_fn=<DivBackward0>), 'log_vars': OrderedDict([('dino_loss', 10.948820114135742), ('loss', 10.948820114135742)]), 'num_samples': 13}
        '''
        loss = runner.outputs['loss']
        #optimize two loss
        #clip_loss = runner.outputs['clip_loss']
        #dino_loss = runner.outputs['dino_loss']
        # print("Loss: %f"%loss)
        # if self.every_n_iters(runner, 20):
        #     loss = torch.tensor(float('inf')).cuda()
        # for debug
        loss_debug = loss.detach()
        if is_distributed():
            all_reduce(loss_debug)

        if torch.isnan(loss_debug) or torch.isinf(loss_debug):

            runner.logger.error(f'Loss anomaly detected: {loss_debug}. Saving anomaly information.')
            data_batch = [data.cpu() if isinstance(data, torch.Tensor) else data for data in runner.data_batch]
            data_batch = all_gather(data_batch)
            if runner.rank == 0:
                anomaly_info = dict(
                    data_batch=data_batch,
                    optimizer_record=self.opt_record)
                runner.save_checkpoint(runner.work_dir, anomaly_info=anomaly_info)
            synchronize()  # wait for master to save checkpoint
            raise RuntimeError('Exit due to loss anomaly.')
        runner.optimizer.zero_grad()
        runner.scaler.scale(loss).backward() #dino_loss

        if self.grad_clip is not None:
            # Unscales the gradients of optimizer's assigned params in-place
            runner.scaler.unscale_(runner.optimizer)
            grad_norm = self.clip_grads(runner.model.module.student.parameters())
            if grad_norm is not None:
                # Add grad norm to the logger
                runner.log_buffer.update({'grad_norm': float(grad_norm)},
                                         runner.outputs['num_samples'])
        cancel_gradients_last_layer(runner.model.module.dino_loss.epoch, runner.model.module.student,
                                          runner.model.module.freeze_last_layer)
        runner.scaler.step(runner.optimizer)
        runner.scaler.update()
        runner.model.module._momentum_update_teacher()




        if self.opt_record is not None:
            self.record(runner)
            if self.every_n_iters(runner, 50):
                info = "\n".join([f"{key}:{value}" for key, value in self.opt_record.items()])
                runner.logger.info(f'optimizer state:\n {info}')
                self.opt_record = collections.defaultdict(dict)


