import argparse
import warnings
warnings.filterwarnings('ignore')
import copy
import logging
import math
import os
import random
import shutil
from contextlib import nullcontext
from pathlib import Path

import accelerate
import numpy as np
import torch
import transformers
from accelerate import Accelerator
from accelerate.logging import get_logger
from accelerate.utils import DistributedType, ProjectConfiguration, set_seed

from datasets import load_dataset
from huggingface_hub import create_repo, upload_folder
from packaging import version
from peft import LoraConfig, set_peft_model_state_dict
from peft.utils import get_peft_model_state_dict
from PIL import Image
from torchvision import transforms
from tqdm.auto import tqdm

import diffusers
from diffusers import AutoencoderKL, FlowMatchEulerDiscreteScheduler, FluxFillPipeline, FluxTransformer2DModel
from diffusers.optimization import get_scheduler
from diffusers.training_utils import (
    cast_training_params,
    compute_density_for_timestep_sampling,
    compute_loss_weighting_for_sd3,
    free_memory,
)
from diffusers.utils import check_min_version, is_wandb_available, load_image, make_image_grid
from diffusers.utils.hub_utils import load_or_create_model_card, populate_model_card
from diffusers.utils.torch_utils import is_compiled_module
from diffusers.image_processor import VaeImageProcessor
from accelerate.utils import DistributedDataParallelKwargs

import pdb

from train_flux_parse import parse_args
from util_flux import pad_image
target_shape = (1024,1024)


NORM_LAYER_PREFIXES = ["norm_q", "norm_k", 
                       "norm_added_q", "norm_added_k"]


flux_transformer = FluxTransformer2DModel.from_pretrained(
        args.pretrained_model_name_or_path,
        subfolder="transformer",
        revision=args.revision,
        variant=args.variant,
    )
flux_transformer.requires_grad_(False)

# enable image inputs
with torch.no_grad():
    initial_input_channels = flux_transformer.config.in_channels
    initial_output_channels = flux_transformer.config.out_channels
    new_linear = torch.nn.Linear( # ori_in 64 now_in 74 out 3072
        flux_transformer.x_embedder.in_features * 3,
        flux_transformer.x_embedder.out_features,
        bias=flux_transformer.x_embedder.bias is not None,
        dtype=flux_transformer.dtype,
        device=flux_transformer.device,
    )
    # new_linear.weight.zero_()
    # new_linear.weight[:, :initial_input_channels].copy_(flux_transformer.x_embedder.weight)
    # if flux_transformer.x_embedder.bias is not None:
    #     new_linear.bias.copy_(flux_transformer.x_embedder.bias)
    flux_transformer.x_embedder = new_linear
    ## load x_embedder
    flux_transformer.register_to_config(in_channels=initial_input_channels * 3, out_channels=initial_output_channels)

