import argparse
import os
import torch
from PIL import Image
from peft import LoraConfig, inject_adapter_in_model, set_peft_model_state_dict

from controlnet_model import ControlNetModel
from unet_controlnet import UNetSpatioTemporalConditionControlNetModel
from pipeline_controlnet_lora import StableVideoDiffusionControlnetPipeline
from diffusers.utils import load_image, export_to_gif
from pprint import pprint


def load_images_from_folder_to_pil(canny_folder_path,
                                   frame_id,
                                   video_name,
                                   target_size=(512, 320)):
    images = []

    video_path = str(os.path.join(canny_folder_path, video_name))
    # Sorting files based on frame number
    sorted_files = sorted(os.listdir(video_path))

    # Load, resize, and convert images
    frame_inx = int(int(frame_id) / 3)
    for filename in sorted_files[frame_inx: frame_inx + 14]:
        img_path = os.path.join(video_path, filename)
        pil_img = load_image(img_path).resize(target_size)
        images.append(pil_img)

    return images

def load_lora_for_unet_spatiotemporal(args, unet):


    print(f"Loading lora model from {args.lora_file_path}...")

    lora_config = LoraConfig(
        r=args.rank,
        lora_alpha=args.lora_alpha,
        target_modules=["to_q", "to_k", "to_v", "to_out.0"],
        bias="none",
        inference_mode=True
    )

    print("Injecting LoRA adapters into the model...")
    lora_unet = inject_adapter_in_model(lora_config, unet)
    lora_file_path = args.lora_file_path
    if lora_file_path is not None and os.path.exists(lora_file_path):
        print(f"Loading LoRA weights from {lora_file_path}...")

        if args.use_safetensors and lora_file_path.endswith(".safetensors"):
            from safetensors.torch import load_file
            lora_state_dict = load_file(lora_file_path)
        else:
            lora_state_dict = torch.load(lora_file_path, map_location=args.device)

        set_peft_model_state_dict(lora_unet, lora_state_dict, adapter_name="default")

    return lora_unet

def parse_args():
    parser = argparse.ArgumentParser(
        description="Stable Video Diffusion Control Net Lora."
    )

    parser.add_argument(
        "--pretrained_model_name_or_path",
        type=str,
        default="stabilityai/stable-video-diffusion-img2vid-xl",
        help="huggingface model path, local or web. If time timeout, you need download by yourself"
    )

    parser.add_argument(
        "--controlnet_model_path",
        type=str,
        default="chengwendong/controlnet_canny_stable_video_diffusion_xl",
        help="the trained model have upload to huggingface website. If time timeout, you need download by yourself"
    )

    parser.add_argument(
        "--lora_file_path",
        type=str,
        default="weights/pytorch_lora_weights.safetensors",
        help="lora local file path, the trained model have upload to huggingface website, you need download from huggingface (chengwendong/controlnet_canny_stable_video_diffusion_xl) by yourself "
    )

    parser.add_argument(
        "--out_file_path",
        type=str,
        default="test.gif",
        help="output video file"
    )

    parser.add_argument(
        "--validation_image_path",
        type=str,
        default="test.jpg",
        help="Input the first frame img",
    )

    parser.add_argument(
        "--validation_control_folder_path",
        type=str,
        default="test_canny_img/",
        help="validation_control_folder_path. you need to ensure that the folder only exists 14 canny image ordered by file_name ascii",
    )

    parser.add_argument(
        "--num_inference_steps",
        type=int,
        default=30,
        help="unet denosing steps, Number of reasoning steps (the higher the value, the better the quality, but it will take more time) 25 - 50"
    )

    parser.add_argument(
        "--min_guidance_scale",
        type=float,
        default=1.0,
        help=("The guiding intensity of the initial frame or the low dynamic part of the video. "
              "The smaller the value, the more free the early generation will be."),
    )

    parser.add_argument(
        "--max_guidance_scale",
        type=float,
        default=5.0,
        help=("The guiding strength of the key frames or the high dynamic parts of the video, the higher the value, "
              "the more closely the key content aligns with the text prompt."),
    )

    parser.add_argument(
        "--fps",
        type=int,
        default=8,
        help=(""),
    )

    parser.add_argument(
        "--motion_bucket_id",
        type=int,
        default=60,
        help=("Control the amplitude of movement and dynamic effects in the generated video."),
    )

    parser.add_argument(
        "--noise_aug_strength",
        type=float,
        default=0.02,
        help=("Control the amount of noise added to the input image, "
              "thereby affecting the creativity of the generated result and the fidelity to the original image."),
    )

    parser.add_argument(
        "--decode_chunk_size",
        type=int,
        default=8,
        help=("The number of frames for parallel decoding (affecting VRAM usage)."),
    )

    parser.add_argument(
        "--conditioning_scale",
        type=float,
        default=1.0,
        help=("The intensity of the influence of control conditions (such as edge maps) on generation."),
    )

    parser.add_argument(
        "--rank",
        type=int,
        default=16,
        help=("lora rank"),
    )


    parser.add_argument(
        "--lora_alpha",
        type=int,
        default=16,
        help=("Controlling the intensity of noise enhancement in the input image affects the degree of difference "
              "between the video and the input image."),
    )

    parser.add_argument(
        "--conditioning_scale",
        type=float,
        default=1.0,
        help=("The extent to which the conditions of the edge map affect the generation result, [0.0, 1.0]."),
    )

    parser.add_argument(
        "--use_safetensors",
        type=bool,
        default=True,
        help=(""),
    )

    parser.add_argument(
        "--device",
        type=str,
        default="cuda:0",
        help=(""),
    )

    return args


def main(args):

    validation_image = load_image(args.validation_image_path).resize((512, 320))
    validation_control_images = load_images_from_folder_to_pil(args.validation_control_folder_path, (512, 320))

    os.makedirs(args.out_path, exist_ok=True)

    # Result Replication
    # generator = torch.Generator(args.device).manual_seed(args.seed)
    # torch.manual_seed(args.seed)
    # np.random.seed(args.seed)

    # Load and set up the pipeline
    # CiaraRowles/temporal-controlnet-depth-svd-v
    print(f"Loading base controlnet model from {args.controlnet_model_path}...")
    controlnet = ControlNetModel.from_pretrained(args.controlnet_model_path,
                                                 low_cpu_mem_usage=False,
                                                 subfolder="controlnet")

    print(f"Loading base UNet model from {args.pretrained_model_name_or_path}...")
    unet = UNetSpatioTemporalConditionControlNetModel.from_pretrained(args.pretrained_model_name_or_path,
                                                                      subfolder="unet",
                                                                      low_cpu_mem_usage=False,
                                                                      variant="fp16",
                                                                      )

    lora_unet = load_lora_for_unet_spatiotemporal(args, unet)

    pipeline = StableVideoDiffusionControlnetPipeline.from_pretrained(args.pretrained_model_name_or_path,
                                                                      controlnet=controlnet,
                                                                      low_cpu_mem_usage=False,
                                                                      unet=lora_unet,
                                                                      variant="fp16").to(args.device)
    pipeline.enable_model_cpu_offload()

    pipeline.to(args.device)

    with torch.inference_mode():
        # Inference and saving loop
        # print(validation_control_images.shape)
        video_frames = pipeline(validation_image,
                                validation_control_images[:14],
                                num_inference_steps = args.num_inference_steps,
                                min_guidance_scale = args.min_guidance_scale,
                                max_guidance_scale=args.max_guidance_scale,
                                fps = args.fps,
                                noise_aug_strength = args.noise_aug_strength,
                                conditioning_scale=args.conditioning_scale,
                                decode_chunk_size=args.decode_chunk_size,
                                num_frames=14,
                                motion_bucket_id=args.motion_bucket_id,
                                width=512,
                                height=320).frames[0]

    export_to_gif(video_frames, args.out_file_path, args.fps)


# Main script
if __name__ == "__main__":
    args = parse_args()
    pprint(vars(args), indent=2, width=100)
    main(args)





