import torch
from diffusers import MotionAdapter, AnimateDiffPipeline, DDIMScheduler
from diffusers.utils import export_to_gif, load_image

# Load the motion adapter
adapter = MotionAdapter.from_pretrained("guoyww/animatediff-motion-adapter-v1-5-2", torch_dtype=torch.float16)
# load SD 1.5 based finetuned model
#model_id = "Lykon/DreamShaper"
model_id = "SG161222/Realistic_Vision_V4.0_noVAE"
pipe = AnimateDiffPipeline.from_pretrained(model_id, motion_adapter=adapter, torch_dtype=torch.float16)
image = load_image("tmp/person3.png")
# scheduler
scheduler = DDIMScheduler(
    clip_sample=False,
    beta_start=0.00085,
    beta_end=0.012,
    beta_schedule="linear",
    timestep_spacing="trailing",
    steps_offset=1
)
pipe.scheduler = scheduler
# enable memory savings
pipe.enable_vae_slicing()
pipe.enable_model_cpu_offload()

def generate():
    output = pipe(
        #prompt="masterpiece, bestquality, highlydetailed, ultradetailed, sunset, orange sky, warm lighting, fishing boats, ocean waves seagulls, rippling water, wharf, silhouette, serene atmosphere, dusk, evening glow, golden hour, coastal landscape, seaside scenery",
        prompt="masterpiece, bestquality, highlydetailed, ultradetailed, cavalry charge",
        negative_prompt="bad quality, worse quality",
        num_frames=16,
        guidance_scale=7.5,
        num_inference_steps=25,
        #generator=torch.Generator("cuda").manual_seed(42),
    )
    frames = output.frames[0]
    export_to_gif(frames, "tmp/animation.gif")

def generate_adapter():
    # load ip_adapter
    pipe.load_ip_adapter("h94/IP-Adapter", subfolder="models", weight_name="ip-adapter_sd15.bin")
    # load motion adapters
    pipe.load_lora_weights("guoyww/animatediff-motion-lora-zoom-out", adapter_name="zoom-out")
    pipe.load_lora_weights("guoyww/animatediff-motion-lora-tilt-up", adapter_name="tilt-up")
    pipe.load_lora_weights("guoyww/animatediff-motion-lora-pan-left", adapter_name="pan-left")
    seed = 42
    images = [image] * 3
    prompts = ["best quality, high quality"] * 3
    negative_prompt = "bad quality, worst quality"
    adapter_weights = [[0.75, 0.0, 0.0], [0.0, 0.0, 0.75], [0.0, 0.75, 0.75]]
    # generate
    output_frames = []
    for prompt, image, adapter_weight in zip(prompts, images, adapter_weights):
        pipe.set_adapters(["zoom-out", "tilt-up", "pan-left"], adapter_weights=adapter_weight)
        output = pipe(
        prompt= prompt,
        num_frames=16,
        guidance_scale=7.5,
        num_inference_steps=30,
        ip_adapter_image = image,
        generator=torch.Generator("cuda").manual_seed(seed),
        )
        frames = output.frames[0]
        output_frames.extend(frames)
    export_to_gif(output_frames, "tmp/c11.gif")

generate()