import torch
from tqdm import tqdm

from opensora.registry import SCHEDULERS
import numpy as np
from .rectified_flow import RFlowScheduler, timestep_transform


@SCHEDULERS.register_module("rflow")
class RFLOW:
    def __init__(
        self,
        num_sampling_steps=10,
        num_timesteps=1000,
        cfg_scale=1.0,
        use_discrete_timesteps=False,
        use_timestep_transform=False,
        **kwargs,
    ):
        self.num_sampling_steps = num_sampling_steps
        self.num_timesteps = num_timesteps
        self.cfg_scale = cfg_scale
        self.use_discrete_timesteps = use_discrete_timesteps
        self.use_timestep_transform = use_timestep_transform

        self.scheduler = RFlowScheduler(
            num_timesteps=num_timesteps,
            num_sampling_steps=num_sampling_steps,
            use_discrete_timesteps=use_discrete_timesteps,
            use_timestep_transform=use_timestep_transform,
            **kwargs,
        )

    def sample(
        self,
        model,
        text_encoder,
        z,
        prompts,
        device,
        additional_args=None,
        mask=None,
        guidance_scale=None,
        progress=True,
    ):
        # if no specific guidance scale is provided, use the default scale when initializing the scheduler
        if guidance_scale is None:
            guidance_scale = self.cfg_scale

        n = len(prompts)
        # text encoding
        model_args = text_encoder.encode(prompts)
        y_null = text_encoder.null(n)
        model_args["y"] = torch.cat([model_args["y"], y_null], 0)
        if additional_args is not None:
            model_args.update(additional_args)

        # prepare timesteps
        # timesteps = [(1.0 - i / self.num_sampling_steps) * self.num_timesteps for i in range(self.num_sampling_steps)]
        timesteps = [1000.0, 966.6, 933.3, 900.0, 800.0, 500.0, 300.0, 100.0]
        if self.use_discrete_timesteps:
            timesteps = [int(round(t)) for t in timesteps]
        timesteps = [torch.tensor([t] * z.shape[0], device=device) for t in timesteps]
        if self.use_timestep_transform:
            timesteps = [timestep_transform(t, additional_args, num_timesteps=self.num_timesteps) for t in timesteps]
        print(timesteps)
        if mask is not None:
            noise_added = torch.zeros_like(mask, dtype=torch.bool)
            noise_added = noise_added | (mask == 1)

        progress_wrap = tqdm if progress else (lambda x: x)
        for i, t in progress_wrap(enumerate(timesteps)):
            # mask for adding noise
            if mask is not None:
                mask_t = mask * self.num_timesteps
                x0 = z.clone()
                x_noise = self.scheduler.add_noise(x0, torch.randn_like(x0), t)

                mask_t_upper = mask_t >= t.unsqueeze(1)
                model_args["x_mask"] = mask_t_upper.repeat(2, 1)
                mask_add_noise = mask_t_upper & ~noise_added

                z = torch.where(mask_add_noise[:, None, :, None, None], x_noise, x0)
                noise_added = mask_t_upper

            # classifier-free guidance
            z_in = torch.cat([z, z], 0)
            t = torch.cat([t, t], 0)
            pred = model(z_in, t, **model_args).chunk(2, dim=1)[0]
            pred_cond, pred_uncond = pred.chunk(2, dim=0)
            v_pred = pred_uncond + guidance_scale * (pred_cond - pred_uncond)

            # update z
            dt = timesteps[i] - timesteps[i + 1] if i < len(timesteps) - 1 else timesteps[i]
            dt = dt / self.num_timesteps
            z = z + v_pred * dt[:, None, None, None, None]

            if mask is not None:
                z = torch.where(mask_t_upper[:, None, :, None, None], z, x0)

        return z
    def map_sample(
        self,
        model,
        text_encoder,
        z,
        prompts,
        device,
        additional_args=None,
        mask=None,
        guidance_scale=None,
        progress=True,
    ):
        # if no specific guidance scale is provided, use the default scale when initializing the scheduler
        if guidance_scale is None:
            guidance_scale = self.cfg_scale

        n = len(prompts)
        # text encoding
        model_args = text_encoder.encode(prompts)
        # y_null = text_encoder.null(n)
        # model_args["y"] = torch.cat([model_args["y"], y_null], 0)
        if additional_args is not None:
            model_args.update(additional_args)

        # prepare timesteps
        timesteps = [(1.0 - i / self.num_sampling_steps) * self.num_timesteps for i in range(self.num_sampling_steps)]
        if self.use_discrete_timesteps:
            timesteps = [int(round(t)) for t in timesteps]
        timesteps = [torch.tensor([t] * z.shape[0], device=device) for t in timesteps]
        if self.use_timestep_transform:
            timesteps = [timestep_transform(t, additional_args, num_timesteps=self.num_timesteps) for t in timesteps]

        if mask is not None:
            noise_added = torch.zeros_like(mask, dtype=torch.bool)
            noise_added = noise_added | (mask == 1)

        progress_wrap = tqdm if progress else (lambda x: x)
        for i, t in progress_wrap(enumerate(timesteps)):
            # mask for adding noise
            if mask is not None:
                mask_t = mask * self.num_timesteps
                x0 = z.clone()
                x_noise = self.scheduler.add_noise(x0, torch.randn_like(x0), t)

                mask_t_upper = mask_t >= t.unsqueeze(1)
                model_args["x_mask"] = mask_t_upper.repeat(1, 1)
                mask_add_noise = mask_t_upper & ~noise_added

                z = torch.where(mask_add_noise[:, None, :, None, None], x_noise, x0)
                noise_added = mask_t_upper

            # classifier-free guidance
            #z_in = torch.cat([z, z], 0)
            z_in = z
            # t = torch.cat([t, t], 0)
            s = timesteps[i + 1] if i < len(timesteps) - 1 else timesteps[i] 
            # s = torch.cat([s, s], 0)
            v_pred = model(z_in, t, s=s, **model_args).chunk(2, dim=1)[0]
            # pred_cond, pred_uncond = pred.chunk(2, dim=0)
            # v_pred = pred_uncond + guidance_scale * (pred_cond - pred_uncond)

            # update z
            dt = timesteps[i] - timesteps[i + 1] if i < len(timesteps) - 1 else timesteps[i]
            dt = dt / self.num_timesteps
            z = z + v_pred * dt[:, None, None, None, None]

            if mask is not None:
                z = torch.where(mask_t_upper[:, None, :, None, None], z, x0)

        return z
    def no_cfg_sample(
        self,
        model,
        text_encoder,
        z,
        prompts,
        device,
        additional_args=None,
        mask=None,
        guidance_scale=None,
        progress=True,
    ):
        # if no specific guidance scale is provided, use the default scale when initializing the scheduler
        if guidance_scale is None:
            guidance_scale = self.cfg_scale

        n = len(prompts)
        # text encoding
        model_args = text_encoder.encode(prompts)
        y_null = text_encoder.null(n)
        # model_args["y"] = torch.cat([model_args["y"], y_null], 0)
        if additional_args is not None:
            model_args.update(additional_args)

        # prepare timesteps
        # timesteps = [(1.0 - i / self.num_sampling_steps) * self.num_timesteps for i in range(self.num_sampling_steps)]
        timesteps = [1000.0, 900.0, 800.0, 400.0]
        # A = self.num_timesteps * (0.1 + np.e ** (-0.5 * self.num_sampling_steps))
        # timesteps = [A / (0.1 + np.e ** (-0.5 * i)) for i in range(self.num_sampling_steps, 0, -1)]
        
        if self.use_discrete_timesteps:
            timesteps = [int(round(t)) for t in timesteps]
        timesteps = [torch.tensor([t] * z.shape[0], device=device) for t in timesteps]
        if self.use_timestep_transform:
            timesteps = [timestep_transform(t, additional_args, num_timesteps=self.num_timesteps) for t in timesteps]
        print(timesteps)
        if mask is not None:
            noise_added = torch.zeros_like(mask, dtype=torch.bool)
            noise_added = noise_added | (mask == 1)

        progress_wrap = tqdm if progress else (lambda x: x)
        for i, t in progress_wrap(enumerate(timesteps)):
            # mask for adding noise
            if mask is not None:
                mask_t = mask * self.num_timesteps
                x0 = z.clone()
                x_noise = self.scheduler.add_noise(x0, torch.randn_like(x0), t)

                mask_t_upper = mask_t >= t.unsqueeze(1)
                model_args["x_mask"] = mask_t_upper.repeat(1, 1)
                mask_add_noise = mask_t_upper & ~noise_added

                z = torch.where(mask_add_noise[:, None, :, None, None], x_noise, x0)
                noise_added = mask_t_upper

            # classifier-free guidance
            #z_in = torch.cat([z, z], 0)
            z_in = z
            # t = torch.cat([t, t], 0)
            # s = torch.cat([s, s], 0)
            # if (i > self.num_sampling_steps * 0.66):
            #     model_args["y"] = y_null
            v_pred = model(z_in, t, **model_args).chunk(2, dim=1)[0]
            # pred_cond, pred_uncond = pred.chunk(2, dim=0)
            # v_pred = pred_uncond + guidance_scale * (pred_cond - pred_uncond)

            # update z
            dt = timesteps[i] - timesteps[i + 1] if i < len(timesteps) - 1 else timesteps[i]
            dt = dt / self.num_timesteps
            z = z + v_pred * dt[:, None, None, None, None]

            if mask is not None:
                z = torch.where(mask_t_upper[:, None, :, None, None], z, x0)

        return z

    def training_losses(self, model, x_start, model_kwargs=None, noise=None, mask=None, weights=None, t=None):
        return self.scheduler.training_losses(model, x_start, model_kwargs, noise, mask, weights, t)
    
    def scalings_for_boundary_conditions(self, timestep, sigma_data=0.5, timestep_scaling=30.0):
        c_skip = sigma_data**2 / ((timestep * timestep_scaling) ** 2 + sigma_data**2)
        c_out = (timestep * timestep_scaling) / ((timestep * timestep_scaling) ** 2 + sigma_data**2) ** 0.5
        return c_skip, c_out
    
    def lcm_sample(
        self,
        model,
        text_encoder,
        z,
        prompts,
        device,
        additional_args=None,
        mask=None,
        guidance_scale=None,
        progress=True,
    ):
        # if no specific guidance scale is provided, use the default scale when initializing the scheduler
        if guidance_scale is None:
            guidance_scale = self.cfg_scale

        n = len(prompts)
        # text encoding
        model_args = text_encoder.encode(prompts)
        # y_null = text_encoder.null(n)
        # model_args["y"] = torch.cat([model_args["y"], y_null], 0)
        
        if additional_args is not None:
            model_args.update(additional_args)

        # prepare timesteps
        timesteps = [(1.0 - i / self.num_sampling_steps) * self.num_timesteps for i in range(self.num_sampling_steps)]
        if self.use_discrete_timesteps:
            timesteps = [int(round(t)) for t in timesteps]
        timesteps = [torch.tensor([t] * z.shape[0], device=device) for t in timesteps]
        if self.use_timestep_transform:
            timesteps = [timestep_transform(t, additional_args, num_timesteps=self.num_timesteps) for t in timesteps]

        if mask is not None:
            noise_added = torch.zeros_like(mask, dtype=torch.bool)
            noise_added = noise_added | (mask == 1)

        progress_wrap = tqdm if progress else (lambda x: x)
        #dt = torch.tensor([0.5]).to(device)
        for i, t in progress_wrap(enumerate(timesteps)):
            # mask for adding noise
            if mask is not None:
                mask_t = mask * self.num_timesteps
                x0 = z.clone()
                x_noise = self.scheduler.add_noise(x0, torch.randn_like(x0), t)

                mask_t_upper = mask_t >= t.unsqueeze(1)
                model_args["x_mask"] = mask_t_upper.repeat(1, 1)
                mask_add_noise = mask_t_upper & ~noise_added

                z = torch.where(mask_add_noise[:, None, :, None, None], x_noise, x0)
                noise_added = mask_t_upper

            # classifier-free guidance
            # print(z.shape, t.shape, model_args['y'].shape)
            v_pred = model(z, t, **model_args).chunk(2, dim=1)[0]
            # pred = model(z, t, **model_args).chunk(2, dim=1)[0]
            #print(timesteps[i])
            c_skip, c_out = self.scalings_for_boundary_conditions(timesteps[i], sigma_data=0.5, timestep_scaling=30.0)
            # print(c_skip, c_out)
            # update z
            # dt = timesteps[i] - timesteps[i + 1] if i < len(timesteps) - 1 else timesteps[i]
            # dt = dt / self.num_timesteps
            # print(dt)
            dt = timesteps[i] / self.num_timesteps
            #dt = dt * (1 - c_out)
            z_0 = z + v_pred * dt[:, None, None, None, None]
            # z = (z + v_pred* dt)*c_out + z * c_skip
            z_0 = z_0 * c_out + z * c_skip #need fix, just change for a while


            noise = torch.randn_like(z)
            dt_ = (timesteps[i + 1] if i < len(timesteps) - 1 else torch.tensor([0])) / self.num_timesteps 
            if i != len(timesteps) - 1:   
                #z = z_0 - v_pred * dt_[:, None, None, None, None]
                z = (1 - dt_) * z_0 + dt_ * noise
            else:
                z = z_0
            # print(dt, dt_, i)
            if mask is not None:
                z = torch.where(mask_t_upper[:, None, :, None, None], z, x0)
        return z
    def lcm_change_bound_sample(
        self,
        model,
        text_encoder,
        z,
        prompts,
        device,
        additional_args=None,
        mask=None,
        guidance_scale=None,
        progress=True,
    ):
        # if no specific guidance scale is provided, use the default scale when initializing the scheduler
        if guidance_scale is None:
            guidance_scale = self.cfg_scale

        n = len(prompts)
        # text encoding
        model_args = text_encoder.encode(prompts)
        y_null = text_encoder.null(n)
        model_args["y"] = torch.cat([model_args["y"], y_null], 0)
        
        if additional_args is not None:
            model_args.update(additional_args)

        # prepare timesteps
        timesteps = [(1.0 - i / self.num_sampling_steps) * self.num_timesteps for i in range(self.num_sampling_steps)]
        if self.use_discrete_timesteps:
            timesteps = [int(round(t)) for t in timesteps]
        timesteps = [torch.tensor([t] * z.shape[0], device=device) for t in timesteps]
        if self.use_timestep_transform:
            timesteps = [timestep_transform(t, additional_args, num_timesteps=self.num_timesteps) for t in timesteps]

        if mask is not None:
            noise_added = torch.zeros_like(mask, dtype=torch.bool)
            noise_added = noise_added | (mask == 1)

        progress_wrap = tqdm if progress else (lambda x: x)
        #dt = torch.tensor([0.5]).to(device)
        for i, t in progress_wrap(enumerate(timesteps)):
            # mask for adding noise
            if mask is not None:
                mask_t = mask * self.num_timesteps
                x0 = z.clone()
                x_noise = self.scheduler.add_noise(x0, torch.randn_like(x0), t)

                mask_t_upper = mask_t >= t.unsqueeze(1)
                model_args["x_mask"] = mask_t_upper.repeat(2, 1)
                mask_add_noise = mask_t_upper & ~noise_added

                z = torch.where(mask_add_noise[:, None, :, None, None], x_noise, x0)
                noise_added = mask_t_upper

            # classifier-free guidance
            z_in = torch.cat([z, z], 0)
            t = torch.cat([t, t], 0)
            pred = model(z_in, t, **model_args).chunk(2, dim=1)[0]
            pred_cond, pred_uncond = pred.chunk(2, dim=0)
            v_pred = pred_uncond + guidance_scale * (pred_cond - pred_uncond)

            # pred = model(z, t, **model_args).chunk(2, dim=1)[0]
            #print(timesteps[i])
            c_skip, c_out = self.scalings_for_boundary_conditions(timesteps[i], sigma_data=0.5, timestep_scaling=30.0)
            # print(c_skip, c_out)
            # update z
            # dt = timesteps[i] - timesteps[i + 1] if i < len(timesteps) - 1 else timesteps[i]
            # dt = dt / self.num_timesteps
            # print(dt)
            dt = timesteps[i] / self.num_timesteps
            if dt > 0.75:
                dt = dt - 0.75
            elif dt > 0.5:
                dt = dt - 0.5
            elif dt > 0.25:
                dt = dt - 0.25
            #dt = dt * (1 - c_out)
            z_0 = z + v_pred * dt[:, None, None, None, None]
            # z = (z + v_pred* dt)*c_out + z * c_skip
            z_0 = z_0 * c_out + z * c_skip


            noise = torch.randn_like(z)
            # dt_ = (timesteps[i + 1] if i < len(timesteps) - 1 else torch.tensor([0])) / self.num_timesteps 
            dt_ = dt - len(timesteps) / self.num_timesteps if dt - len(timesteps) / self.num_timesteps > 0 else 0 
            if i != len(timesteps) - 1:   
                #z = z_0 - v_pred * dt_[:, None, None, None, None]
                z = (1 - dt_) * z_0 + dt_ * noise
            else:
                z = z_0
            print(dt, dt_, i)
            if mask is not None:
                z = torch.where(mask_t_upper[:, None, :, None, None], z, x0)
        return z
    def statistic_sample(
        self,
        model,
        text_encoder,
        z,
        prompts,
        device,
        additional_args=None,
        mask=None,
        guidance_scale=None,
        progress=True,
    ):
        # if no specific guidance scale is provided, use the default scale when initializing the scheduler
        if guidance_scale is None:
            guidance_scale = self.cfg_scale

        n = len(prompts)
        # text encoding
        model_args = text_encoder.encode(prompts)
        # y_null = text_encoder.null(n)
        # model_args["y"] = torch.cat([model_args["y"], y_null], 0)
        if additional_args is not None:
            model_args.update(additional_args)

        # prepare timesteps
        timesteps = [(1.0 - i / self.num_sampling_steps) * self.num_timesteps for i in range(self.num_sampling_steps)]
        # A = self.num_timesteps * (0.1 + np.e ** (-0.5 * self.num_sampling_steps))
        # timesteps = [A / (0.1 + np.e ** (-0.5 * i)) for i in range(self.num_sampling_steps, 0, -1)]
        print(timesteps)
        if self.use_discrete_timesteps:
            timesteps = [int(round(t)) for t in timesteps]
        timesteps = [torch.tensor([t] * z.shape[0], device=device) for t in timesteps]
        if self.use_timestep_transform:
            timesteps = [timestep_transform(t, additional_args, num_timesteps=self.num_timesteps) for t in timesteps]

        if mask is not None:
            noise_added = torch.zeros_like(mask, dtype=torch.bool)
            noise_added = noise_added | (mask == 1)

        progress_wrap = tqdm if progress else (lambda x: x)
        for i, t in progress_wrap(enumerate(timesteps)):
            # mask for adding noise
            if mask is not None:
                mask_t = mask * self.num_timesteps
                x0 = z.clone()
                x_noise = self.scheduler.add_noise(x0, torch.randn_like(x0), t)

                mask_t_upper = mask_t >= t.unsqueeze(1)
                model_args["x_mask"] = mask_t_upper.repeat(1, 1)
                mask_add_noise = mask_t_upper & ~noise_added

                z = torch.where(mask_add_noise[:, None, :, None, None], x_noise, x0)
                noise_added = mask_t_upper

            # classifier-free guidance
            #z_in = torch.cat([z, z], 0)
            z_in = z
            # t = torch.cat([t, t], 0)
            # s = torch.cat([s, s], 0)
            v_pred = model(z_in, t, **model_args).chunk(2, dim=1)[0]
            # pred_cond, pred_uncond = pred.chunk(2, dim=0)
            # v_pred = pred_uncond + guidance_scale * (pred_cond - pred_uncond)

            # update z
            noise = torch.randn_like(z)
            dt = timesteps[i] - timesteps[i + 1] if i < len(timesteps) - 1 else timesteps[i]
            dt = dt / self.num_timesteps
            if i != len(timesteps) - 1:
                dt = dt + 0.03                
                z = z + v_pred * dt[:, None, None, None, None]
                dt = torch.tensor([0.03]).to(z.device)
                z = z + noise * dt[:, None, None, None, None]
            else:
                z = z + v_pred * dt[:, None, None, None, None] 

            if mask is not None:
                z = torch.where(mask_t_upper[:, None, :, None, None], z, x0)

        return z
    # def no_uniform_sample(
    #     self,
    #     model,
    #     text_encoder,
    #     z,
    #     prompts,
    #     device,
    #     additional_args=None,
    #     mask=None,
    #     guidance_scale=None,
    #     progress=True,
    # ):
    #     # if no specific guidance scale is provided, use the default scale when initializing the scheduler
    #     if guidance_scale is None:
    #         guidance_scale = self.cfg_scale

    #     n = len(prompts)
    #     # text encoding
    #     model_args = text_encoder.encode(prompts)
    #     # y_null = text_encoder.null(n)
    #     # model_args["y"] = torch.cat([model_args["y"], y_null], 0)
    #     if additional_args is not None:
    #         model_args.update(additional_args)

    #     # prepare timesteps
    #     # timesteps = [(1.0 - i / self.num_sampling_steps) * self.num_timesteps for i in range(self.num_sampling_steps)]
    #     timesteps = [1000.0, 966.6, 933.3, 900.0, 800.0, 700.0, 600.0, 500.0, 400.0, 300.0, 200.0,]
    #     if self.use_discrete_timesteps:
    #         timesteps = [int(round(t)) for t in timesteps]
    #     timesteps = [torch.tensor([t] * z.shape[0], device=device) for t in timesteps]
    #     if self.use_timestep_transform:
    #         timesteps = [timestep_transform(t, additional_args, num_timesteps=self.num_timesteps) for t in timesteps]

    #     if mask is not None:
    #         noise_added = torch.zeros_like(mask, dtype=torch.bool)
    #         noise_added = noise_added | (mask == 1)

    #     progress_wrap = tqdm if progress else (lambda x: x)
    #     for i, t in progress_wrap(enumerate(timesteps)):
    #         # mask for adding noise
    #         if mask is not None:
    #             mask_t = mask * self.num_timesteps
    #             x0 = z.clone()
    #             x_noise = self.scheduler.add_noise(x0, torch.randn_like(x0), t)

    #             mask_t_upper = mask_t >= t.unsqueeze(1)
    #             model_args["x_mask"] = mask_t_upper.repeat(1, 1)
    #             mask_add_noise = mask_t_upper & ~noise_added

    #             z = torch.where(mask_add_noise[:, None, :, None, None], x_noise, x0)
    #             noise_added = mask_t_upper

    #         # classifier-free guidance
    #         #z_in = torch.cat([z, z], 0)
    #         z_in = z
    #         # t = torch.cat([t, t], 0)
    #         # s = torch.cat([s, s], 0)
    #         v_pred = model(z_in, t, **model_args).chunk(2, dim=1)[0]
    #         # pred_cond, pred_uncond = pred.chunk(2, dim=0)
    #         # v_pred = pred_uncond + guidance_scale * (pred_cond - pred_uncond)

    #         # update z
    #         dt = timesteps[i] - timesteps[i + 1] if i < len(timesteps) - 1 else timesteps[i]
    #         dt = dt / self.num_timesteps
    #         z = z + v_pred * dt[:, None, None, None, None]

    #         if mask is not None:
    #             z = torch.where(mask_t_upper[:, None, :, None, None], z, x0)

    #     return z
    def no_uniform_sample(
        self,
        model,
        text_encoder,
        z,
        prompts,
        device,
        additional_args=None,
        mask=None,
        guidance_scale=None,
        progress=True,
    ):
        # if no specific guidance scale is provided, use the default scale when initializing the scheduler
        if guidance_scale is None:
            guidance_scale = self.cfg_scale

        n = len(prompts)
        # text encoding
        model_args = text_encoder.encode(prompts)
        y_null = text_encoder.null(n)
        model_args["y"] = torch.cat([model_args["y"], y_null], 0)
        if additional_args is not None:
            model_args.update(additional_args)

        # prepare timesteps
        # timesteps = [(1.0 - i / self.num_sampling_steps) * self.num_timesteps for i in range(self.num_sampling_steps)]
        timesteps = [1000.0, 966.6, 933.3, 900, 866.6, 833.3, 800.0, 866.6, 833.3, 700.0, 766.6, 733.3, 600.0, 666.6, 633.3, 500.0, 400.0,366.6, 333.3, 300.0,266.6, 233.3, 200.0,166.6, 133.3, 100.0,66.6, 33.3]
        if self.use_discrete_timesteps:
            timesteps = [int(round(t)) for t in timesteps]
        timesteps = [torch.tensor([t] * z.shape[0], device=device) for t in timesteps]
        if self.use_timestep_transform:
            timesteps = [timestep_transform(t, additional_args, num_timesteps=self.num_timesteps) for t in timesteps]

        if mask is not None:
            noise_added = torch.zeros_like(mask, dtype=torch.bool)
            noise_added = noise_added | (mask == 1)

        progress_wrap = tqdm if progress else (lambda x: x)
        for i, t in progress_wrap(enumerate(timesteps)):
            # mask for adding noise
            if mask is not None:
                mask_t = mask * self.num_timesteps
                x0 = z.clone()
                x_noise = self.scheduler.add_noise(x0, torch.randn_like(x0), t)

                mask_t_upper = mask_t >= t.unsqueeze(1)
                model_args["x_mask"] = mask_t_upper.repeat(2, 1)
                mask_add_noise = mask_t_upper & ~noise_added

                z = torch.where(mask_add_noise[:, None, :, None, None], x_noise, x0)
                noise_added = mask_t_upper

            # classifier-free guidance
            z_in = torch.cat([z, z], 0)
            t = torch.cat([t, t], 0)
            pred = model(z_in, t, **model_args).chunk(2, dim=1)[0]
            pred_cond, pred_uncond = pred.chunk(2, dim=0)
            v_pred = pred_uncond + guidance_scale * (pred_cond - pred_uncond)

            # update z
            dt = timesteps[i] - timesteps[i + 1] if i < len(timesteps) - 1 else timesteps[i]
            dt = dt / self.num_timesteps
            z = z + v_pred * dt[:, None, None, None, None]

            if mask is not None:
                z = torch.where(mask_t_upper[:, None, :, None, None], z, x0)

        return z
