# ppgddpm的训练循环类
# 在此类中需完成对当前批次的每一张图像，执行前向传播计算损失、反向传播根据损失调整模型

from utils.train_utils.ddpm_train_util import *


class PPGDDPMTrainLoop(DDPMTrainLoop):
    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)

    def batch_process(self, batch):
        img, psf = batch
        return img, psf
    
    def forward_backward(self, batch):
        batch, cond = self.batch_process(batch)  # 从batch(1批次数据)中获取batch(训练数据)和cond(条件数据)
        self.mp_trainer.zero_grad()
        for i in range(0, batch.shape[0], self.microbatch):
            # 提取当前微批次数据和条件数据，并将其移动到设备上
            img_data = batch[i: i + self.microbatch].to(dist_util.dev())
            psf_data = {
                k: v[i: i + self.microbatch].to(dist_util.dev())
                for k, v in cond.items()
            }
            last_batch = (i + self.microbatch) >= batch.shape[0]
            # 采样时间步和权重
            t, weights = self.schedule_sampler.sample(img_data.shape[0], dist_util.dev())

            # 定义偏函数，用于计算损失
            # 调用偏函数时，即调用self.diffusion.training_losses
            # 并传入ddp_model(unet模型), micro(微批次数据), t(时间步), model_kwargs(条件数据)
            compute_losses = functools.partial(
                self.diffusion.training_losses,
                self.ddp_model,
                img_data,
                t,
                model_kwargs=psf_data
            )

            # 调用偏函数计算损失，并根据是否为最后一个微批次或是否不使用分布式数据并行来决定是否同步梯度
            if last_batch or not self.use_ddp:
                losses = compute_losses()
            else:
                with self.ddp_model.no_sync():
                    losses = compute_losses()

            # 根据损失和权重计算损失的平均值，并记录损失字典
            if isinstance(self.schedule_sampler, LossAwareSampler):
                self.schedule_sampler.update_with_local_losses(
                    t, losses["loss"].detach()
                )

            # 计算总损失并进行反向传播
            loss = (losses["loss"] * weights).mean()
            log_loss_dict(
                self.diffusion, t, {k: v * weights for k, v in losses.items()}
            )
            self.mp_trainer.backward(loss)
