# Copyright 2024 The HuggingFace Team. All rights reserved.
# Copyright (c) 2025 Bytedance Ltd. and/or its affiliates
# SPDX-License-Identifier: Apache-2.0
# This file has been modified by [ByteDance Ltd. and/or its affiliates.] on 2025

# Original file was released under Apache-2.0, with the full license text
# available at https://github.com/hpcaitech/Open-Sora/blob/main/LICENSE.

# This modified file is released under the same license.


import sys
from videosys import OpenSoraConfig, VideoSysEngine
import torch
from einops import rearrange
from videosys.models.transformers.open_sora_transformer_3d import t2i_modulate, auto_grad_checkpoint
from videosys.core.comm import all_to_all_with_pad, gather_sequence, get_pad, set_pad, split_sequence
import numpy as np
from videosys.utils.utils import batch_func
from functools import partial
import os
import copy
import json
import tqdm
import time
from safetensors.torch import save_file, load_file
from diffusers.utils import logging
logger = logging.get_logger(__name__)  # pylint: disable=invalid-name


def read_prompt_list(prompt_list_path):
    with open(prompt_list_path, "r") as f:
        prompt_list = json.load(f)
    prompt_list = [prompt["prompt_en"] for prompt in prompt_list]
    return prompt_list


def generate_func(pipeline, prompt_list, output_dir, tensor_folder_name=None, num_frames=51, loop: int = 5, kwargs: dict = {}):
    kwargs["verbose"] = False
    os.makedirs(output_dir, exist_ok=True)
    if getattr(pipeline.driver_worker.scheduler,"save_error",False):
        tensor_folder_path = os.path.join(output_dir,tensor_folder_name)
        os.makedirs(tensor_folder_path,exist_ok=True)

    cnt=0
    for prompt in tqdm.tqdm(prompt_list):
        for l in range(loop):
            if os.path.exists(os.path.join(output_dir, f"{prompt}-{l}.mp4")):
                print(f'{prompt}-{l} exists')
                continue
            os.environ['ADJUST_PATH'] = os.path.join(os.getenv('ADJUST_FOLDER',''),f'K_hw_{l}.safetensors')
            video = pipeline.generate(prompt, seed=l, num_frames=num_frames, **kwargs).video[0]
            pipeline.save_video(video, os.path.join(output_dir, f"{prompt}-{l}.mp4"))
            if getattr(pipeline.driver_worker.scheduler,"save_error",False):
                save_file(pipeline.driver_worker.scheduler.__class__.save_cache, os.path.join(tensor_folder_path, f"{cnt}_seed{l}.safetensors"))
        cnt+=1





def ertacache_forward(
        self, x, timestep, all_timesteps, y, mask=None, x_mask=None, fps=None, height=None, width=None, **kwargs
    ):
        # === Split batch ===
        if self.parallel_manager.cp_size > 1:
            x, timestep, y, x_mask, mask = batch_func(
                partial(split_sequence, process_group=self.parallel_manager.cp_group, dim=0),
                x,
                timestep,
                y,
                x_mask,
                mask,
            )

        dtype = self.x_embedder.proj.weight.dtype
        B = x.size(0)
        x = x.to(dtype)
        timestep = timestep.to(dtype)
        y = y.to(dtype)

        # === get pos embed ===
        _, _, Tx, Hx, Wx = x.size()
        T, H, W = self.get_dynamic_size(x)
        S = H * W
        base_size = round(S**0.5)
        resolution_sq = (height[0].item() * width[0].item()) ** 0.5
        scale = resolution_sq / self.input_sq_size
        pos_emb = self.pos_embed(x, H, W, scale=scale, base_size=base_size)

        # === get timestep embed ===
        t = self.t_embedder(timestep, dtype=x.dtype)  # [B, C]
        fps = self.fps_embedder(fps.unsqueeze(1), B)
        t = t + fps
        t_mlp = self.t_block(t)
        t0 = t0_mlp = None
        if x_mask is not None:
            t0_timestep = torch.zeros_like(timestep)
            t0 = self.t_embedder(t0_timestep, dtype=x.dtype)
            t0 = t0 + fps
            t0_mlp = self.t_block(t0)

        # === get y embed ===
        if self.config.skip_y_embedder:
            y_lens = mask
            if isinstance(y_lens, torch.Tensor):
                y_lens = y_lens.long().tolist()
        else:
            y, y_lens = self.encode_text(y, mask)

        # === get x embed ===
        x = self.x_embedder(x)  # [B, N, C]
        x = rearrange(x, "B (T S) C -> B T S C", T=T, S=S)
        x = x + pos_emb

        if self.enable_ertacache:
            if all_timesteps.index(timestep[0]) in self.skip_list:
                x = rearrange(x, "B T S C -> B (T S) C", T=T, S=S)
                x = x + self.previous_residual
                self.skip_cnt += 1
                logger.info(f"skip timestep:{all_timesteps.index(timestep[0])}, l1_distance:{self.accumulated_rel_l1_distance}")
            
            else:
                if self.parallel_manager.sp_size > 1:
                    set_pad("temporal", T, self.parallel_manager.sp_group)
                    set_pad("spatial", S, self.parallel_manager.sp_group)
                    x = split_sequence(x, self.parallel_manager.sp_group, dim=1, grad_scale="down", pad=get_pad("temporal"))
                    T = x.shape[1]
                    x_mask_org = x_mask
                    x_mask = split_sequence(
                        x_mask, self.parallel_manager.sp_group, dim=1, grad_scale="down", pad=get_pad("temporal")
                    )

                x = rearrange(x, "B T S C -> B (T S) C", T=T, S=S)
                origin_x = x.clone().detach()
                for spatial_block, temporal_block in zip(self.spatial_blocks, self.temporal_blocks):
                    x = auto_grad_checkpoint(
                        spatial_block,
                        x,
                        y,
                        t_mlp,
                        y_lens,
                        x_mask,
                        t0_mlp,
                        T,
                        S,
                        timestep,
                        all_timesteps=all_timesteps,
                    )

                    x = auto_grad_checkpoint(
                        temporal_block,
                        x,
                        y,
                        t_mlp,
                        y_lens,
                        x_mask,
                        t0_mlp,
                        T,
                        S,
                        timestep,
                        all_timesteps=all_timesteps,
                    )
                    
                self.previous_residual = x - origin_x
                
                
                
        else:
            if self.parallel_manager.sp_size > 1:
                set_pad("temporal", T, self.parallel_manager.sp_group)
                set_pad("spatial", S, self.parallel_manager.sp_group)
                x = split_sequence(x, self.parallel_manager.sp_group, dim=1, grad_scale="down", pad=get_pad("temporal"))
                T = x.shape[1]
                x_mask_org = x_mask
                x_mask = split_sequence(
                    x_mask, self.parallel_manager.sp_group, dim=1, grad_scale="down", pad=get_pad("temporal")
                )
            x = rearrange(x, "B T S C -> B (T S) C", T=T, S=S)
            for spatial_block, temporal_block in zip(self.spatial_blocks, self.temporal_blocks):
                x = auto_grad_checkpoint(
                    spatial_block,
                    x,
                    y,
                    t_mlp,
                    y_lens,
                    x_mask,
                    t0_mlp,
                    T,
                    S,
                    timestep,
                    all_timesteps=all_timesteps,
                )

                x = auto_grad_checkpoint(
                    temporal_block,
                    x,
                    y,
                    t_mlp,
                    y_lens,
                    x_mask,
                    t0_mlp,
                    T,
                    S,
                    timestep,
                    all_timesteps=all_timesteps,
                )
                
        if self.parallel_manager.sp_size > 1:
            if self.enable_ertacache:
                if should_calc:
                    x = rearrange(x, "B (T S) C -> B T S C", T=T, S=S)
                    self.previous_residual = rearrange(self.previous_residual, "B (T S) C -> B T S C", T=T, S=S)
                    x = gather_sequence(x, self.parallel_manager.sp_group, dim=1, grad_scale="up", pad=get_pad("temporal"))
                    self.previous_residual = gather_sequence(self.previous_residual, self.parallel_manager.sp_group, dim=1, grad_scale="up", pad=get_pad("temporal"))
                    T, S = x.shape[1], x.shape[2]
                    x = rearrange(x, "B T S C -> B (T S) C", T=T, S=S)
                    self.previous_residual = rearrange(self.previous_residual, "B T S C -> B (T S) C", T=T, S=S)
                    x_mask = x_mask_org                                 
            else:
                x = rearrange(x, "B (T S) C -> B T S C", T=T, S=S)
                x = gather_sequence(x, self.parallel_manager.sp_group, dim=1, grad_scale="up", pad=get_pad("temporal"))
                T, S = x.shape[1], x.shape[2]
                x = rearrange(x, "B T S C -> B (T S) C", T=T, S=S)
                x_mask = x_mask_org
                
                
        # === final layer ===
        x = self.final_layer(x, t, x_mask, t0, T, S)
        x = self.unpatchify(x, T, H, W, Tx, Hx, Wx)
        x = x.to(torch.float32)

        # === Gather Output ===
        if self.parallel_manager.cp_size > 1:
            x = gather_sequence(x, self.parallel_manager.cp_group, dim=0)
        if timestep[0]  == all_timesteps[-1]:
            logger.info(f"skip {self.skip_cnt} steps, skip ratio: {self.skip_cnt / len(all_timesteps)}")
        return x

            
            

def eval_ertacache(prompt_list, num_frames, loop=1):
    ckpt_dir = os.path.abspath(os.getenv('CKPT_DIR','./hpcai-tech'))
    text_encoder_dir = os.path.abspath(os.getenv('TEXT_DIR','./DeepFloyd'))

    config = OpenSoraConfig(
        transformer = f"{ckpt_dir}/OpenSora-STDiT-v3",
        vae = f"{ckpt_dir}/OpenSora-VAE-v1.2",
        text_encoder = f"{text_encoder_dir}/t5-v1_1-xxl",
        enable_flash_attn=False)
    engine = VideoSysEngine(config)
    logger.info(config)
    engine.driver_worker.transformer.__class__.enable_ertacache = int(os.getenv("EVAL_ERTACACHE",0)) == 1
    engine.driver_worker.transformer.__class__.skip_list =  json.loads(os.getenv('SKIP_LIST','[]'))
    engine.driver_worker.transformer.__class__.cache_list = {i:0 for i in range(30)}
    engine.driver_worker.transformer.__class__.accumulated_rel_l1_distance = 0
    engine.driver_worker.transformer.__class__.previous_modulated_input = None
    engine.driver_worker.transformer.__class__.previous_residual = None
    engine.driver_worker.transformer.__class__.forward = ertacache_forward
    engine.driver_worker.transformer.__class__.skip_cnt = 0
    engine.driver_worker.transformer.__class__.video_index = 0
    engine.driver_worker.scheduler.__class__.save_error = int(os.getenv("SAVE_ERROR",0)) == 1
    engine.driver_worker.scheduler.__class__.save_cache = {}
    engine.driver_worker.scheduler.__class__.skip_list = engine.driver_worker.transformer.__class__.skip_list 
    output_dir = os.getenv('OUTPUT_DIR',"./samples/tmp")
    tensor_folder_name = 'ertacache_tensor' if engine.driver_worker.transformer.__class__.enable_ertacache else 'tensor'
    generate_func(engine, prompt_list, output_dir, tensor_folder_name, num_frames=num_frames, loop= loop)




    
if __name__ == "__main__":
    num = int(os.getenv('DATA_TEST',0))
    prompt_list = read_prompt_list(os.getenv('PROMPT_PATH','./VBench_full_info.json'))[:num]
    eval_ertacache(prompt_list, num_frames=int(os.getenv('NUM_FRAMES',51)),loop=1)
    