#!/usr/bin/env python
# coding=utf-8
import sys
sys.path.append('.')
from opensora.registry import DATASETS, MODELS, SCHEDULERS, build_module

from opensora.acceleration.parallel_states import (
        get_data_parallel_group,
        set_data_parallel_group,
        set_sequence_parallel_group,

    )
# from opensora.datasets import prepare_dataloader, prepare_variable_dataloader
import torch
import torch.distributed as dist
from torch.utils.data import DataLoader, Dataset
from opensora.models.text_encoder.t5 import text_preprocessing
from peft import LoraConfig, get_peft_model, get_peft_model_state_dict, set_peft_model_state_dict, PeftModel
from opensora.datasets import IMG_FPS, save_sample
import os
# cfg = dict(
#     type="iddpm",
#     timestep_respacing=""
# 
# )
width = 720
height = 1280
num_frames = 96
fps=24
samplestep = 30
setting = "720_1280_opensora_32"
#output_dir = "/mount/ccai_nas2/yubo/lcm/validate_data/video"
output_dir = "./data/opensora30_nomask"
model_config = dict(
    type="STDiT3-XL/2",
    #from_pretrained="/mount/ccai_nas2/yubo/lcm/checkpoints/lcm-open-sora-v1-2-30/checkpoint-64000/model",
    from_pretrained="/run/determined/workdir2/OpenSora-STDiT-v3",
    qk_norm=True,
    enable_flash_attn=True,
    enable_layernorm_kernel=True,
)
vae_config = dict(
    type="OpenSoraVAE_V1_2",
    from_pretrained="/run/determined/workdir2/OpenSora-VAE-v1.2/",
    micro_frame_size=17,
    micro_batch_size=4,
)
vae = build_module(vae_config, MODELS)
latent_size = vae.get_latent_size((num_frames, width, height))
text_encoder_config = dict(
    type="t5",
    #from_pretrained="/home/yubo/Open-Sora/checkpoints/Open-Sora/T5/",
    from_pretrained="/run/determined/workdir2/Open-Sora/T5/",
    model_max_length=300,
)
text_encoder = build_module(text_encoder_config, MODELS)
tokenizer = text_encoder.t5.tokenizer
scheduler_config = dict(
    type="rflow",
    use_timestep_transform=True,
    num_sampling_steps=samplestep,
    cfg_scale=7.0,
)
scheduler = build_module(scheduler_config, SCHEDULERS)
dit = build_module(
        model_config,
        MODELS,
        input_size=latent_size,
        in_channels=vae.out_channels,
        caption_channels=text_encoder.output_dim,
        model_max_length=text_encoder.model_max_length

    )
pytorch_total_params = sum(p.numel() for p in dit.parameters())
print(f"total parameters:{pytorch_total_params}")


# save_dir = "lcm-open-sora-distilled_5-29/checkpoint-500"
# dit.load_adapter(save_dir)
#print("activate adapters:", dit.active_adapters())
#print(type(dit))
one_batch_size = 1

#prompts = "a clear stream flows slowly in the quiet forest"
model_args = dict()

model_args["height"] = torch.tensor([width], device="cuda").repeat(one_batch_size)
model_args["width"]  = torch.tensor([height], device="cuda").repeat(one_batch_size)
model_args["num_frames"] = torch.tensor([num_frames], device="cuda").repeat(one_batch_size)
model_args["ar"]  = torch.tensor([width / height], device="cuda").repeat(one_batch_size)
model_args["fps"] = torch.tensor([fps], device="cuda").repeat(one_batch_size)
prompts = [
        #"boundless sea fulls of surging waves and mixed with wind and rain",
        #"an aerial view of a beach at sunset with orange and yellow hues lighting up the sky above",
        "a clear stream flows slowly in the quiet forest"
        #"portrait photo of a girl, photograph, highly detailed face, depth of field, moody light, golden hour, style by Dan Winters, Russell James, Steve McCurry, centered, extremely detailed, Nikon D850, award winning photography"

]
dit.to(device="cuda", dtype=torch.bfloat16)
vae.to(device="cuda", dtype=dit.dtype)
text_encoder.y_embedder = dit.y_embedder
text_encoder.t5.model.to(device="cuda", dtype=dit.dtype)
i = 0
with open("./assets/gallery.txt", "r") as f:
#with open("prompts.txt", "r") as f:
    lines = f.readlines()
    import time
    for prompt in lines:
        print(prompt)
        prompts = [prompt]
        with torch.no_grad():
            sample_steps = [samplestep]
            for sample_step in sample_steps:
                z = torch.randn(1, vae.out_channels, *latent_size, device="cuda", dtype=torch.bfloat16)
                start_time = time.time()
                samples = scheduler.sample(
                    dit,
                    text_encoder,
                    z=z,
                    prompts = [text_preprocessing(prompts)],
                    device=z.device,
                    additional_args = model_args,
                    #mask=torch.ones((1,28), device=z.device)
                )
                print(samples.std())
                print(samples.mean())
                vidoes = vae.decode(samples.to(torch.bfloat16))
                print("time cosume:", time.time() - start_time)
                if not os.path.exists(f"{output_dir}/{setting}/{sample_step}_step"):
                    os.makedirs(f"{output_dir}/{setting}/{sample_step}_step")
                    #save_sample(vidoes[0], save_path=f"test/sample/{sample_step}_step/{i}")
                save_sample(vidoes[0], fps=fps, save_path=f"{output_dir}/{setting}/{sample_step}_step/{i}")
                print(i)
                i = i+1        

