import sys
sys.path.append(".")
import os
import time
from pprint import pformat

import colossalai
import torch
import torch.distributed as dist
from colossalai.cluster import DistCoordinator
from mmengine.runner import set_random_seed
from tqdm import tqdm

from opensora.acceleration.parallel_states import set_sequence_parallel_group
from opensora.datasets import save_sample
from opensora.datasets.aspect import get_image_size, get_num_frames
from opensora.models.text_encoder.t5 import text_preprocessing
from opensora.utils.config_utils import parse_configs
from torch.utils.data import DataLoader
import numpy as np
from opensora.registry import DATASETS, MODELS, SCHEDULERS, build_module
from opensora.utils.inference_utils import (
    add_watermark,
    append_generated,
    append_score_to_prompts,
    apply_mask_strategy,
    collect_references_batch,
    dframe_to_frame,
    extract_json_from_prompts,
    extract_prompts_loop,
    get_save_path_name,
    load_prompts,
    merge_prompt,
    prepare_multi_resolution_info,
    refine_prompts_by_openai,
    split_prompt,
)
from opensora.utils.misc import all_exists, create_logger, is_distributed, is_main_process, to_torch_dtype




def main():
    torch.set_grad_enabled(False)
    # ======================================================
    # configs & runtime variables
    # ======================================================
    # == parse configs ==
    cfg = parse_configs(training=False)

    # == device and dtype ==
    device = "cuda" if torch.cuda.is_available() else "cpu"
    cfg_dtype = cfg.get("dtype", "fp32")
    assert cfg_dtype in ["fp16", "bf16", "fp32"], f"Unknown mixed precision {cfg_dtype}"
    dtype = to_torch_dtype(cfg.get("dtype", "bf16"))
    torch.backends.cuda.matmul.allow_tf32 = True
    torch.backends.cudnn.allow_tf32 = True

    # == init distributed env ==
    if is_distributed():
        colossalai.launch_from_torch({})
        coordinator = DistCoordinator()
        enable_sequence_parallelism = coordinator.world_size > 1
        if enable_sequence_parallelism:
            set_sequence_parallel_group(dist.group.WORLD)
    else:
        coordinator = None
        enable_sequence_parallelism = False
    set_random_seed(seed=cfg.get("seed", 1024))

    # == init logger ==
    logger = create_logger()
    logger.info("Inference configuration:\n %s", pformat(cfg.to_dict()))
    verbose = cfg.get("verbose", 1)
    progress_wrap = tqdm

    # ======================================================
    # build model & load weights
    # ======================================================
    logger.info("Building models...")
    # == build text-encoder and vae ==
    text_encoder = build_module(cfg.text_encoder, MODELS, device=device)
    vae = build_module(cfg.vae, MODELS).to(device, dtype).eval()

    # == prepare video size ==
    image_size = cfg.get("image_size", None)
    if image_size is None:
        resolution = cfg.get("resolution", None)
        aspect_ratio = cfg.get("aspect_ratio", None)
        assert (
            resolution is not None and aspect_ratio is not None
        ), "resolution and aspect_ratio must be provided if image_size is not provided"
        image_size = get_image_size(resolution, aspect_ratio)
    num_frames = get_num_frames(cfg.num_frames)

    # == build diffusion model ==
    input_size = (num_frames, *image_size)
    latent_size = vae.get_latent_size(input_size)
    model = (
        build_module(
            cfg.model,
            MODELS,
            input_size=latent_size,
            in_channels=vae.out_channels,
            caption_channels=text_encoder.output_dim,
            model_max_length=text_encoder.model_max_length,
            enable_sequence_parallelism=enable_sequence_parallelism,
        )
        .to(device, dtype)
        .eval()
    )
    text_encoder.y_embedder = model.y_embedder  # HACK: for classifier-free guidance

    # == build scheduler ==
    scheduler = build_module(cfg.scheduler, SCHEDULERS)

    # ======================================================
    # inference
    # ======================================================
    # == load prompts ==
    multi_resolution = cfg.get("multi_resolution", None)
    batch_size = cfg.get("batch_size", 1)
    num_sample = cfg.get("num_sample", 1)
    dataset_cfg = dict(
        type="TextDataset",
        data_path=cfg.prompt_path,
    )
    dataset = build_module(dataset_cfg, DATASETS)
    prompt_loader = DataLoader(
        dataset,
        batch_size=batch_size,
        drop_last=True,
        pin_memory=True,
        num_workers=1,
    )
    # == prepare reference =

    # == prepare arguments ==
    fps = cfg.fps
    
   
  

    save_dir = cfg.save_dir
    os.makedirs(save_dir, exist_ok=True)
    sample_name = cfg.get("sample_name", None)
    prompt_as_path = cfg.get("prompt_as_path", False)
    save_csv = {
        "path" : [],
        "rectified_path" : []
    }
    # == Iter over all samples ==
    progress_bar = tqdm(
        range(0, len(prompt_loader)),
        initial=0,
        desc="Steps",
        # Only show the progress bar once on each machine.
    )
    start_idx = 0
    torch.manual_seed(1024)
    for i, batch in progress_wrap(enumerate(prompt_loader)):
        # == prepare batch prompts ==
        batch_prompts = batch.pop('text')
        path = batch.pop('path')
        flow = batch.pop("flow").cpu().numpy()[0]
        aes = batch.pop("aes").cpu().numpy()[0]
        fps = batch.pop("fps").cpu().numpy()[0]
        # == multi-resolution info ==
        model_args = prepare_multi_resolution_info(
            multi_resolution, len(batch_prompts), image_size, num_frames, fps, device, dtype
        )
        # == Iter over number of sampling for one prompt ==
        for k in range(num_sample):
            # == prepare save paths ==
            save_paths = [
                get_save_path_name(
                    save_dir,
                    sample_name=sample_name,
                    sample_idx=start_idx + idx,
                    prompt=batch_prompts[idx],
                    prompt_as_path=prompt_as_path,
                    num_sample=num_sample,
                    k=k,
                )
                for idx in range(len(batch_prompts))
            ]

            # NOTE: Skip if the sample already exists
            # This is useful for resuming sampling VBench
            if prompt_as_path and all_exists(save_paths):
                continue

            # == process prompts step by step ==
            # 0. split prompt
            # each element in the list is [prompt_segment_list, loop_idx_list]
            
            

            # 2. append score
            # for idx, prompt_segment_list in enumerate(original_batch_prompts):
            #     batch_prompts[idx] = append_score_to_prompts(
            #         prompt_segment_list,
            #         aes=aes,
            #         flow=flow,
            #         camera_motion=cfg.get("camera_motion", None),
            #     )
            batch_prompts = append_score_to_prompts(
                batch_prompts,
                aes=aes,
                flow=flow
            )

            # 3. clean prompt with T5
            for idx, prompt_segment_list in enumerate(batch_prompts):
                batch_prompts[idx] = text_preprocessing(prompt_segment_list)

            
            # == Iter over loop generation ==
           
            z = torch.randn(len(batch_prompts), vae.out_channels, *latent_size, device=device, dtype=dtype)
    
            sample_method = cfg.get("sample_method", None)
            samples = None
            if sample_method == "lcm":
                samples = scheduler.lcm_sample(
                    model,
                    text_encoder,
                    z=z,
                    prompts=batch_prompts,
                    device=device,
                    additional_args=model_args,
                    progress=verbose >= 2,
                    #mask=masks,
                )
            elif sample_method == "cfg":
                samples = scheduler.sample(
                    model,
                    text_encoder,
                    z=z,
                    prompts=batch_prompts,
                    device=device,
                    additional_args=model_args,
                    progress=verbose >= 2,
                    #mask=masks,
                )
            elif sample_method == "no_cfg":
                samples = scheduler.no_cfg_sample(
                    model,
                    text_encoder,
                    z=z,
                    prompts=batch_prompts,
                    device=device,
                    additional_args=model_args,
                    progress=verbose >= 2,
                    #mask=masks,
                )
            elif sample_method == "map_sample":
                samples = scheduler.map_sample(
                        model,
                        text_encoder,
                        z=z,
                        prompts=batch_prompts,
                        device=device,
                        additional_args=model_args,
                        progress=verbose >= 2,
                        #mask=masks,
                    )
            # samples = vae.decode(samples.to(dtype), num_frames=num_frames)

            # == save samples ==
            if is_main_process():
                for idx, batch_prompt in enumerate(batch_prompts):
                    if verbose >= 2:
                        logger.info("Prompt: %s", batch_prompt)
                    save_path = save_paths[idx]
                    z0 = samples[idx]
                    z1 = z[idx]
                    np.savez(save_path, z1=z1.float().cpu(), z0=z0.float().cpu())
                    with open("example.csv", "a") as f:
                        f.write(path[idx] + "," + save_path + ".npz\n")
                    # save_csv["path"].append(path[idx])
                    # save_csv["rectified_path"].append(save_path + ".npz")
        start_idx += len(batch_prompts)
        progress_bar.update(1)
    # import pandas as pd
    # df = pd.DataFrame(save_csv)
    # df.to_csv('example.csv', index=False, encoding='utf-8')
    logger.info("Inference finished.")
    logger.info("Saved %s samples to %s", start_idx, save_dir)


if __name__ == "__main__":
    main()