import argparse
import itertools
import math
import os
import random
from pathlib import Path
from packaging import version
from safetensors.torch import load_model
import shutil
import numpy as np
import torch
import torch.nn.functional as F
import torch.utils.checkpoint
from accelerate import Accelerator
from accelerate.logging import get_logger
from accelerate.utils import ProjectConfiguration, set_seed
from huggingface_hub import create_repo, upload_folder
from huggingface_hub.utils import insecure_hashlib
from PIL import Image, ImageDraw
from torch.utils.data import Dataset,ConcatDataset
from torchvision import transforms
from tqdm.auto import tqdm
from transformers import CLIPTextModel, CLIPTokenizer
from torch import nn
from diffusers import (
    AutoencoderKL,
    DDPMScheduler,
    StableDiffusionPipeline,
    UNet2DConditionModel,
)
from diffusers.optimization import get_scheduler
# from diffusers.training_utils import EMAModel, compute_dream_and_update_latents, compute_snr
from diffusers.training_utils import EMAModel,  compute_snr
from diffusers.utils import check_min_version
from diffusers.utils.import_utils import is_xformers_available


import sys
sys.path.append('/mnt/afs2d/luotianhang/smartvehicle_diffusion/diffusers/examples/inpainting')
from datalist import *
# Will error if the minimal version of diffusers is not installed. Remove at your own risks.
check_min_version("0.13.0.dev0")

logger = get_logger(__name__)


# from datalist_left_obj.datalist_back import *
# from datalist_left_obj.datalist_front import *

def set_seed(seed):
    import random
    import torch
    import numpy as np
    
    random.seed(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)
    if torch.cuda.is_available():
        torch.cuda.manual_seed_all(seed)


def parse_args():
    parser = argparse.ArgumentParser(description="Simple example of a training script.")
    parser.add_argument(
        "--pretrained_model_name_or_path",
        type=str,
        default=None,
        required=True,
        help="Path to pretrained model or model identifier from huggingface.co/models.",
    )
    parser.add_argument(
        "--output_dir",
        type=str,
        default="text-inversion-model",
        help="The output directory where the model predictions and checkpoints will be written.",
    )
    parser.add_argument("--seed", type=int, default=1234, help="A seed for reproducible training.")
    parser.add_argument(
        "--resolution",
        type=int,
        default=512,
        help=(
            "The resolution for input images, all the images in the train/validation dataset will be resized to this"
            " resolution"
        ),
    )
    parser.add_argument(
        "--center_crop",
        default=False,
        action="store_true",
        help=(
            "Whether to center crop the input images to the resolution. If not set, the images will be randomly"
            " cropped. The images will be resized to the resolution first before cropping."
        ),
    )
    parser.add_argument("--train_text_encoder", action="store_true", help="Whether to train the text encoder")
    parser.add_argument(
        "--train_batch_size", type=int, default=4, help="Batch size (per device) for the training dataloader."
    )
    parser.add_argument(
        "--sample_batch_size", type=int, default=4, help="Batch size (per device) for sampling images."
    )
    parser.add_argument("--num_train_epochs", type=int, default=1)
    parser.add_argument(
        "--max_train_steps",
        type=int,
        default=None,
        help="Total number of training steps to perform.  If provided, overrides num_train_epochs.",
    )
    parser.add_argument(
        "--gradient_accumulation_steps",
        type=int,
        default=1,
        help="Number of updates steps to accumulate before performing a backward/update pass.",
    )
    parser.add_argument(
        "--gradient_checkpointing",
        action="store_true",
        help="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.",
    )
    parser.add_argument(
        "--learning_rate",
        type=float,
        default=5e-6,
        help="Initial learning rate (after the potential warmup period) to use.",
    )
    parser.add_argument(
        "--scale_lr",
        action="store_true",
        default=False,
        help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.",
    )
    parser.add_argument(
        "--lr_scheduler",
        type=str,
        default="constant",
        help=(
            'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",'
            ' "constant", "constant_with_warmup"]'
        ),
    )
    parser.add_argument(
        "--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler."
    )
    parser.add_argument(
        "--use_8bit_adam", action="store_true", help="Whether or not to use 8-bit Adam from bitsandbytes."
    )
    parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.")
    parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.")
    parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.")
    parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer")
    parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.")
    parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.")
    parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.")
    parser.add_argument(
        "--hub_model_id",
        type=str,
        default=None,
        help="The name of the repository to keep in sync with the local `output_dir`.",
    )
    parser.add_argument(
        "--logging_dir",
        type=str,
        default="logs",
        help=(
            "[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to"
            " *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***."
        ),
    )
    parser.add_argument(
        "--mixed_precision",
        type=str,
        default="no",
        choices=["no", "fp16", "bf16"],
        help=(
            "Whether to use mixed precision. Choose"
            "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
            "and an Nvidia Ampere GPU."
        ),
    )
    parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank")
    parser.add_argument(
        "--checkpointing_steps",
        type=int,
        default=500,
        help=(
            "Save a checkpoint of the training state every X updates. These checkpoints can be used both as final"
            " checkpoints in case they are better than the last checkpoint and are suitable for resuming training"
            " using `--resume_from_checkpoint`."
        ),
    )
    parser.add_argument(
        "--checkpoints_total_limit",
        type=int,
        default=None,
        help=(
            "Max number of checkpoints to store. Passed as `total_limit` to the `Accelerator` `ProjectConfiguration`."
            " See Accelerator::save_state https://huggingface.co/docs/accelerate/package_reference/accelerator#accelerate.Accelerator.save_state"
            " for more docs"
        ),
    )
    parser.add_argument(
        "--resume_from_checkpoint",
        type=str,
        default=None,
        help=(
            "Whether training should be resumed from a previous checkpoint. Use a path saved by"
            ' `--checkpointing_steps`, or `"latest"` to automatically select the last available checkpoint.'
        ),
    )
    parser.add_argument(
        "--enable_xformers_memory_efficient_attention", action="store_true", help="Whether or not to use xformers."
    )
    parser.add_argument(
        "--num_workers",
        type=int,
        default=4,
        help=("num workers to convy data")

    )
    parser.add_argument("--noise_offset", type=float, default=0, help="The scale of noise offset.")
    parser.add_argument(
        "--input_perturbation", type=float, default=0, help="The scale of input perturbation. Recommended 0.1."
    )
    parser.add_argument(
        "--dream_training",
        action="store_true",
        help=("Use the DREAM training method, which makes training more efficient and accurate at the expense of doing an extra forward pass. See: https://arxiv.org/abs/2312.00210"),
    )
    parser.add_argument(
        "--snr_gamma",
        type=float,
        default=None,
        help="SNR weighting gamma to be used if rebalancing the loss. Recommended value is 5.0. "
        "More details here: https://arxiv.org/abs/2303.09556.",
    )
    args = parser.parse_args()
    env_local_rank = int(os.environ.get("LOCAL_RANK", -1))
    if env_local_rank != -1 and env_local_rank != args.local_rank:
        args.local_rank = env_local_rank

   
   
    return args




def main():
    args = parse_args()
    logging_dir = Path(args.output_dir, args.logging_dir)

    project_config = ProjectConfiguration(
        total_limit=args.checkpoints_total_limit, project_dir=args.output_dir, logging_dir=logging_dir
    )

    accelerator = Accelerator(
        gradient_accumulation_steps=args.gradient_accumulation_steps,
        mixed_precision=args.mixed_precision,
        log_with="tensorboard",
        project_config=project_config,
    )
    if args.train_text_encoder and args.gradient_accumulation_steps > 1 and accelerator.num_processes > 1:
        raise ValueError(
            "Gradient accumulation is not supported when training the text encoder in distributed training. "
            "Please set gradient_accumulation_steps to 1. This feature will be supported in the future."
        )

    if args.seed is not None:
        rank = accelerator.process_index
        print(rank)
        set_seed(args.seed+rank)


    # Handle the repository creation
    if accelerator.is_main_process:
        if args.output_dir is not None:
            os.makedirs(args.output_dir, exist_ok=True)

    tokenizer = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder="tokenizer")
    text_encoder = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="text_encoder")
    vae = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder="vae")
    # unet = UNet2DConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="unet",ignore_mismatched_sizes=True,low_cpu_mem_usage=False) 
    unet = UNet2DConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="unet")
    
    in_channels = 9
    if unet.conv_in.in_channels == in_channels:
        pass
    else:
        out_channels = unet.conv_in.out_channels
        unet.register_to_config(in_channels=in_channels)

        with torch.no_grad():
            new_conv_in = nn.Conv2d(
                in_channels, out_channels, unet.conv_in.kernel_size, unet.conv_in.stride, unet.conv_in.padding
            )
            new_conv_in.weight.zero_()
            new_conv_in.weight[:, :4, :, :].copy_(unet.conv_in.weight) # 细节 
            unet.conv_in = new_conv_in

    vae.requires_grad_(False)
    text_encoder.requires_grad_(False)
    if args.enable_xformers_memory_efficient_attention:
        if is_xformers_available():
            import xformers

            xformers_version = version.parse(xformers.__version__)
            if xformers_version == version.parse("0.0.16"):
                logger.warn(
                    "xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details."
                )
            unet.enable_xformers_memory_efficient_attention()
        else:
            raise ValueError("xformers is not available. Make sure it is installed correctly")
    if args.gradient_checkpointing:
        unet.enable_gradient_checkpointing()
        if args.train_text_encoder:
            text_encoder.gradient_checkpointing_enable()

    if args.scale_lr:
        args.learning_rate = (
            args.learning_rate * args.gradient_accumulation_steps * args.train_batch_size * accelerator.num_processes
        )

    # Use 8-bit Adam for lower memory usage or to fine-tune the model in 16GB GPUs
    if args.use_8bit_adam:
        try:
            import bitsandbytes as bnb
        except ImportError:
            raise ImportError(
                "To use 8-bit Adam, please install the bitsandbytes library: `pip install bitsandbytes`."
            )

        optimizer_class = bnb.optim.AdamW8bit
    else:
        optimizer_class = torch.optim.AdamW

    optimizer = optimizer_class(
        unet.parameters(),
        lr=args.learning_rate,
        betas=(args.adam_beta1, args.adam_beta2),
        weight_decay=args.adam_weight_decay,
        eps=args.adam_epsilon,
    )

    noise_scheduler = DDPMScheduler.from_pretrained(args.pretrained_model_name_or_path, subfolder="scheduler")

    from inpainting_dataset import InpaintingDataset,SCALE_INDEX
    

    def tokenize_captions(captions):
        inputs = tokenizer(
            captions, max_length=tokenizer.model_max_length, padding="max_length", truncation=True, return_tensors="pt"
        )
        return inputs.input_ids



    train_datalist=[

                # image2image
                # pet
                (data_5_pet2_0,4),# using blip
                (data_5_pet2_1,4),# using blip
                (data_5_pet2_2,4),# using blip
                # (data_5_pet2_3,4),# 这个数据不敢上是因为生成的有点像水彩画
                (data_5_good,2*4),# pet cat dog # using blip
                # (tdata_pet_cat,1),# 不敢用，用完发现宠物变形厉害
                # (tdata_pet_dog,1),# 不敢用，用完发现宠物变形厉害
                # safeseat
                (data_ALPS_safeseat_sensebee_2328,60),
                (data_from_trainingdata_people_on_safeseat,60),
                (data_from_trainingdata_only_safeseat,60),
                # (tdata23,4),# repeat 23 
                # (tdata24,5), # desgined for the long text and people sit on the seat
                (tdata35,4),
                (tdata41,4),
                # (tdata23,1),# repeat 23 
                # (tdata35,1),
                # (tdata41,1),
                # opensource 
                (tdata_oabench,60), # inpainting
                (tdata_foodseg103,2*2), # inpainting # using blip
                # (data_image2image_visual_genome_object,6),
                (data_image2image_visual_genome_region,10),
                (data_pet_seg_category_mask,1*6),# using blip
                # (data_imagenet_image2image_box,4),
                (data_cub_200_image2image,3),
                # (data_coco2017_image2image_box,1),# using blip # 绝对不能用
                (data_pet_seg_category_face,2),# using blip
                (data_openimagev6_image2image,15),
                # text2image
                (tdata_from_text2image,2), # text2image
                (tdata_from_text2image_jackyhate_2M_512_2M,2), # text2image
                (tdata_from_text2image_jackyhate_2M1024_10k,2), # text2image
                (tdata_from_text2image_CortexLM_midjourney_v6,2), # text2image
                (data_coco2017_text2image,1*2), # text2image high quality
                (data_sam_text2image_a20,1*1),
                (data_sam_text2image_a21,1*1),
                (data_sam_text2image_a22,1),
                (data_sam_text2image_a23,1),
                (data_sam_text2image_a24,1),
                (data_sam_text2image_a25,1),
                (data_sam_text2image_a28,1),
                (data_sam_text2image_a29,1),
                (data_sam_text2image_a131,1),
                (data_sam_text2image_a132,1),
                (data_imagenet_text2image_blip,1),
                (data_wallpaper_text2image,2),
                (data_text2image_flickr30k,1),
                # (data_voc_text2image,2),
                (data_aed20k_text2image,4),# size到训练尺度后，容易出现blur
                (data_openimagev6_text2image,15),
                (data_flux_prompt_photo_face_text2image,10),
                (data_fluxdev_controlnet_16k_text2image,10),

                # (data_coco2017_image2image_mask,4),
                # # (data_imagenet_text2image,1),# text2image
                # (data_voc_image2image,1*6),
                # (data_voc_image2image_mask,1*6),

                # # # ###################################### tdata left_obj
                # (tdata_left_obj_back_1,1),
                # (tdata_left_obj_back_2,1), 
                # (tdata_left_obj_back_3,1), 
                # (tdata_left_obj_back_4,1), 
                # (tdata_left_obj_back_7,1), 
                # (tdata_left_obj_back_8,1), 
                # (tdata_left_obj_back_9,1), 
                # (tdata_left_obj_back_10,1),
                # (tdata_left_obj_back_13,1),
                # (tdata_left_obj_back_15,1),
                # (tdata_left_obj_back_16,1),
                # (tdata_left_obj_back_17,1),
                # (tdata_left_obj_back_18,1),
                # (tdata_left_obj_back_19,1),
                # (tdata_left_obj_back_20,1),
                # (tdata_left_obj_back_21,1),
                # (tdata_left_obj_back_26,1),
                # (tdata_left_obj_back_27,1),
                # (tdata_left_obj_back_28,1),
                # (tdata_left_obj_back_29,1),
                # (tdata_left_obj_back_31,1),
                # (tdata_left_obj_back_32,1),
                # (tdata_left_obj_back_33,1),
                # (tdata_left_obj_back_35,1),
                # (tdata_left_obj_back_36,1),
                # (tdata_left_obj_back_37,1),
                # (tdata_left_obj_back_38,1),
                # (tdata_left_obj_back_41,1),
                # (tdata_left_obj_back_42,1),
                # (tdata_left_obj_back_44,1),
                # (tdata_left_obj_back_45,1),
                # (tdata_left_obj_back_46,1),
                # (tdata_left_obj_back_47,1),
                # (tdata_left_obj_back_48,1),
                # (tdata_left_obj_back_49,1),
                # (tdata_left_obj_back_52,1),
                # (tdata_left_obj_back_53,1),
                # (tdata_left_obj_back_54,1),
                # (tdata_left_obj_front_1,1),
                # (tdata_left_obj_front_2,1),
                # (tdata_left_obj_front_3,1),
                # (tdata_left_obj_front_4,1),
                # (tdata_left_obj_front_5,1),
                # (tdata_left_obj_front_6,1),
                # (tdata_left_obj_front_7,1),
                # (tdata_left_obj_front_8,1),
                # (tdata_left_obj_front_9,1),
                # (tdata_left_obj_front_10,1),
                # (tdata_left_obj_front_11,1),
                # (tdata_left_obj_front_12,1),
                # (tdata_left_obj_front_13,1),
                # (tdata_left_obj_front_14,1),
                # (tdata_left_obj_front_15,1),
                # (tdata_left_obj_front_16,1),
                # (tdata_left_obj_front_17,1),
                # (tdata_left_obj_front_18,1),
                # (tdata_left_obj_front_19,1),
                # (tdata_left_obj_front_20,1),
                # (tdata_left_obj_front_21,1),
                # (tdata_left_obj_front_22,1),
                # (tdata_left_obj_front_23,1),
                # (tdata_left_obj_front_24,1),
                # (tdata_left_obj_front_25,1),
                # (tdata_left_obj_front_26,1),
                # (tdata_left_obj_front_27,1),
                # (tdata_left_obj_front_28,1),
                # (tdata_left_obj_front_29,1),
                # (tdata_left_obj_front_30,1),
                # (tdata_left_obj_front_31,1),
                # (tdata_left_obj_front_32,1),
                # (tdata_left_obj_front_33,1),
                # (tdata_left_obj_front_34,1),
                # (tdata_left_obj_front_35,1),
                # (tdata_left_obj_front_36,1),
                # (tdata_left_obj_front_37,1),
                # (tdata_left_obj_front_38,1),
                # (tdata_left_obj_front_39,1),
                # (tdata_left_obj_front_40,1),
                # (tdata_left_obj_front_41,1),
                # (tdata_left_obj_front_42,1),
                # (tdata_left_obj_front_43,1),
                # (tdata_left_obj_front_44,1),
                # (tdata_left_obj_front_45,1),
                # (tdata_left_obj_front_46,1),
                # (tdata_left_obj_front_47,1),
                # (tdata_left_obj_front_48,1),
                # (tdata_left_obj_front_49,1),
                # (tdata_left_obj_front_50,1),
                # (tdata_left_obj_front_52,1),
                # (tdata_left_obj_front_53,1),
                # (tdata_left_obj_front_54,1),
                # (tdata_left_obj_front_55,1),
                # (tdata_left_obj_front_56,1),
                # (tdata_left_obj_front_57,1),
                # (tdata_left_obj_front_60,1),
                # (tdata_left_obj_front_61,1),
                # (tdata_left_obj_front_62,1),
               
                
                ]


    train_dataset = InpaintingDataset(
        image_maps=train_datalist,
        tokenize_captions=tokenize_captions,

    )


    def collate_fn(examples):
        input_ids = [example["input_ids"] for example in examples]
        pixel_values = [example["edited_pixel_values"] for example in examples]
        masks = [example["mask"] for example in examples]
        masked_images = [example["maked_images"] for example in examples]
        
        pixel_values = torch.stack(pixel_values)
        pixel_values = pixel_values.to(memory_format=torch.contiguous_format).float()

        masks = torch.stack(masks)
        masked_images = torch.stack(masked_images)
        batch = {
            "input_ids": torch.stack(input_ids), 
            "pixel_values": pixel_values, 
            "masks": masks, 
            "masked_images": masked_images
            }
        return batch

    train_dataloader = torch.utils.data.DataLoader(
        train_dataset, batch_size=args.train_batch_size, shuffle=True, collate_fn=collate_fn,num_workers=args.num_workers
    )

    # Scheduler and math around the number of training steps.
    # overrode_max_train_steps = False
    num_update_steps_per_epoch = 15000 / args.gradient_accumulation_steps
    # if args.max_train_steps is None:
    #     args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
    #     overrode_max_train_steps = True

    lr_scheduler = get_scheduler(
        args.lr_scheduler,
        optimizer=optimizer,
        num_warmup_steps=args.lr_warmup_steps * accelerator.num_processes,
        num_training_steps=args.max_train_steps * accelerator.num_processes,
    )

    unet, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(
            unet, optimizer, train_dataloader, lr_scheduler
        )
    accelerator.register_for_checkpointing(lr_scheduler)

    weight_dtype = torch.float32
    if args.mixed_precision == "fp16":
        weight_dtype = torch.float16
    elif args.mixed_precision == "bf16":
        weight_dtype = torch.bfloat16

  
    vae.to(accelerator.device, dtype=weight_dtype)
    text_encoder.to(accelerator.device, dtype=weight_dtype)

    num_update_steps_per_epoch = 15000 / args.gradient_accumulation_steps
   
    args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)

    # We need to initialize the trackers we use, and also store our configuration.
    if accelerator.is_main_process:
        accelerator.init_trackers("inpainting", config=vars(args))

    # Train!
    total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps

    logger.info("***** Running training *****")
    logger.info(f"  Num examples = {len(train_dataset)}")
    logger.info(f"  Num batches each epoch = {len(train_dataloader)}")
    logger.info(f"  Num Epochs = {args.num_train_epochs}")
    logger.info(f"  Instantaneous batch size per device = {args.train_batch_size}")
    logger.info(f"  Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}")
    logger.info(f"  Gradient Accumulation steps = {args.gradient_accumulation_steps}")
    logger.info(f"  Total optimization steps = {args.max_train_steps}")
    global_step = 0
    first_epoch = 0

    if args.resume_from_checkpoint:
        if args.resume_from_checkpoint != "latest":
            path = os.path.basename(args.resume_from_checkpoint)
        else:
            # Get the most recent checkpoint
            dirs = os.listdir(args.output_dir)
            dirs = [d for d in dirs if d.startswith("checkpoint")]
            dirs = sorted(dirs, key=lambda x: int(x.split("-")[1]))
            path = dirs[-1] if len(dirs) > 0 else None

        if path is None:
            accelerator.print(
                f"Checkpoint '{args.resume_from_checkpoint}' does not exist. Starting a new training run."
            )
            args.resume_from_checkpoint = None
        else:
            accelerator.print(f"Resuming from checkpoint {path}")
            accelerator.load_state(args.resume_from_checkpoint)
            global_step = int(path.split("checkpoint-")[1].split('/')[0])

            first_epoch = int(global_step // num_update_steps_per_epoch)
          
            print('global_step:',global_step)

    # Only show the progress bar once on each machine.
    progress_bar = tqdm(range(global_step, args.max_train_steps,1), disable=not accelerator.is_local_main_process)
    progress_bar.set_description(f"{args.output_dir}::Steps")

    for epoch in range(first_epoch, args.num_train_epochs):
        unet.train()
        for step, batch in enumerate(train_dataloader):
            # Skip steps until we reach the resumed step
            # if args.resume_from_checkpoint and step < resume_step:
            #     # if step % args.gradient_accumulation_steps == 0:
            #     #     progress_bar.update(1)
            #     continue

            with accelerator.accumulate(unet):
                # Convert images to latent space

                latents = vae.encode(batch["pixel_values"].to(dtype=weight_dtype)).latent_dist.sample()
                latents = latents * vae.config.scaling_factor

                # Convert masked images to latent space
                masked_latents = vae.encode(
                    batch["masked_images"].reshape(batch["pixel_values"].shape).to(dtype=weight_dtype)
                ).latent_dist.sample()
                masked_latents = masked_latents * vae.config.scaling_factor

                masks = batch["masks"]
                # resize the mask to latents shape as we concatenate the mask to the latents
                mask = torch.stack(
                    [
                        torch.nn.functional.interpolate(mask, size=(360*SCALE_INDEX // 8, 640*SCALE_INDEX // 8))
                        for mask in masks
                    ]
                )
                mask = mask.reshape(-1, 1, 360*SCALE_INDEX // 8, 640*SCALE_INDEX // 8)

                # Sample noise that we'll add to the latents
                noise = torch.randn_like(latents)
                if args.noise_offset:
                    # https://www.crosslabs.org//blog/diffusion-with-offset-noise
                    noise += args.noise_offset * torch.randn(
                        (latents.shape[0], latents.shape[1], 1, 1), device=latents.device
                    )
                if args.input_perturbation:
                    new_noise = noise + args.input_perturbation * torch.randn_like(noise)

                bsz = latents.shape[0]
                # Sample a random timestep for each image
                timesteps = torch.randint(0, noise_scheduler.config.num_train_timesteps, (bsz,), device=latents.device)
                timesteps = timesteps.long()

                # Add noise to the latents according to the noise magnitude at each timestep
                # (this is the forward diffusion process)
                if args.input_perturbation:
                    noisy_latents = noise_scheduler.add_noise(latents, new_noise, timesteps)
                else:
                    noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps)

                # concatenate the noised latents with the mask and the masked latents
                latent_model_input = torch.cat([noisy_latents, mask, masked_latents], dim=1)

                # Get the text embedding for conditioning
                encoder_hidden_states = text_encoder(batch["input_ids"])[0]

                if args.dream_training:
                    noisy_latents, target = compute_dream_and_update_latents(
                        unet,
                        noise_scheduler,
                        timesteps,
                        noise,
                        noisy_latents,
                        target,
                        encoder_hidden_states,
                        args.dream_detail_preservation,
                    )


                # Predict the noise residual
                noise_pred = unet(latent_model_input, timesteps, encoder_hidden_states).sample

                # Get the target for loss depending on the prediction type
                # todo 按照mask调整权重，保证小物体能出效果，这个权重最好还是渐变的，这样背景好融合
                if noise_scheduler.config.prediction_type == "epsilon":
                    target = noise
                elif noise_scheduler.config.prediction_type == "v_prediction":
                    target = noise_scheduler.get_velocity(latents, noise, timesteps)
                else:
                    raise ValueError(f"Unknown prediction type {noise_scheduler.config.prediction_type}")

                
                if args.snr_gamma is None:
                    loss = F.mse_loss(noise_pred.float(), target.float(), reduction="mean")
                else:
                    # Compute loss-weights as per Section 3.4 of https://arxiv.org/abs/2303.09556.
                    # Since we predict the noise instead of x_0, the original formulation is slightly changed.
                    # This is discussed in Section 4.2 of the same paper.
                    snr = compute_snr(noise_scheduler, timesteps)
                    mse_loss_weights = torch.stack([snr, args.snr_gamma * torch.ones_like(timesteps)], dim=1).min(
                        dim=1
                    )[0]
                    if noise_scheduler.config.prediction_type == "epsilon":
                        mse_loss_weights = mse_loss_weights / snr
                    elif noise_scheduler.config.prediction_type == "v_prediction":
                        mse_loss_weights = mse_loss_weights / (snr + 1)

                    loss = F.mse_loss(noise_pred.float(), target.float(), reduction="none")
                    loss = loss.mean(dim=list(range(1, len(loss.shape)))) * mse_loss_weights
                    loss = loss.mean()

                accelerator.backward(loss)
                # if accelerator.sync_gradients:
                #     params_to_clip = (
                #         itertools.chain(unet.parameters(), text_encoder.parameters())
                #         if args.train_text_encoder
                #         else unet.parameters()
                #     )
                #     accelerator.clip_grad_norm_(params_to_clip, args.max_grad_norm)
                optimizer.step()
                lr_scheduler.step()
                optimizer.zero_grad()

            # Checks if the accelerator has performed an optimization step behind the scenes
            if accelerator.sync_gradients:
                progress_bar.update(1)
                global_step += 1
                print('global_step:',global_step)
                if global_step % args.checkpointing_steps == 0:
                    if accelerator.is_main_process:
                        if args.checkpoints_total_limit is not None:
                            checkpoints = os.listdir(args.output_dir)
                            checkpoints = [d for d in checkpoints if d.startswith("checkpoint")]
                            checkpoints = sorted(checkpoints, key=lambda x: int(x.split("-")[1]))

                            # before we save the new checkpoint, we need to have at _most_ `checkpoints_total_limit - 1` checkpoints
                            if len(checkpoints) >= args.checkpoints_total_limit:
                                num_to_remove = len(checkpoints) - args.checkpoints_total_limit + 1
                                removing_checkpoints = checkpoints[0:num_to_remove]

                                logger.info(
                                    f"{len(checkpoints)} checkpoints already exist, removing {len(removing_checkpoints)} checkpoints"
                                )
                                logger.info(f"removing checkpoints: {', '.join(removing_checkpoints)}")

                                for removing_checkpoint in removing_checkpoints:
                                    removing_checkpoint = os.path.join(args.output_dir, removing_checkpoint)
                                    shutil.rmtree(removing_checkpoint)
                                    
                        save_path = os.path.join(args.output_dir, f"checkpoint-{global_step}")
                        accelerator.save_state(save_path)
                        logger.info(f"Saved state to {save_path}")
            logs = {"loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0]}
            progress_bar.set_postfix(**logs)
            accelerator.log(logs, step=global_step)

            if global_step >= args.max_train_steps:
                break

        accelerator.wait_for_everyone()

    # Create the pipeline using using the trained modules and save it.
    if accelerator.is_main_process:
        pipeline = StableDiffusionPipeline.from_pretrained(
            args.pretrained_model_name_or_path,
            unet=accelerator.unwrap_model(unet),
            text_encoder=accelerator.unwrap_model(text_encoder),
        )
        pipeline.save_pretrained(args.output_dir)

        

    accelerator.end_training()


if __name__ == "__main__":
    main()