# from diffusers import DiffusionPipeline
from diffusers import I2VGenXLPipeline
import moviepy.editor as mp
import torch
from diffusers.utils import export_to_gif
from PIL import Image
import gc

class I2VGenXLModel:
  def __init__(self):
    model_id = "ali-vilab/i2vgen-xl" 
    pipeline = I2VGenXLPipeline.from_pretrained(model_id, local_files_only=True, torch_dtype=torch.float16, variant="fp16").to("cuda")

    pipeline.enable_model_cpu_offload()
    # pipeline = pipeline.to("cuda")
    print("load pipeline done..")
    self.model_id = model_id
    self.pipeline = pipeline
  
  def generate(self, prompt: str, image: Image, out_file: str):
    print("begin generate..")
    generator = torch.manual_seed(8888)
    frames = self.pipeline(
      prompt=prompt,
      image=image,
      generator=generator
    ).frames[0]
    print("generated..")
    
    filename = f"{out_file}.gif"
    print("filename:",filename)
    gif_file = export_to_gif(frames, filename)
    print("gif_file:", gif_file)
    
    clip = mp.VideoFileClip(gif_file)
    video_file = f"{out_file}"
    clip.write_videofile(video_file)

    memory = torch.cuda.max_memory_allocated() / 1024 / 1024 / 1024
    print(f"Memory: {memory}GB")
    return video_file
  
  def close(self):
    del self.pipeline
    gc.collect()
    torch.cuda.empty_cache()
    
  # def save_image(self, prompt: str, output_dir: str = ""):
  #   image = self.generate(prompt)
  #   ts = datetime.now().strftime("%Y%m%d%H%M%S")
  #   filename = f"{output_dir}{ts}.jpg"
  #   image.save(filename)

__Image2VideoModelInstance__: I2VGenXLModel = None

def image2video(prompt:str, image: Image, out_file: str):
  global __Image2VideoModelInstance__
  if __Image2VideoModelInstance__ is None:
    print("create video module instance..")
    __Image2VideoModelInstance__ = I2VGenXLModel()
  return __Image2VideoModelInstance__.generate(prompt, image, out_file)

def clear():
  if __Image2VideoModelInstance__:
    __Image2VideoModelInstance__.close()