#!/usr/bin/env python
# coding=utf-8
import sys
sys.path.append('.')
from opensora.registry import MODELS, SCHEDULERS, build_module
import torch
import torch.distributed as dist
from opensora.models.text_encoder.t5 import text_preprocessing
import numpy as np
from opensora.datasets import IMG_FPS, get_transforms_video, read_video, temporal_random_crop
import os
import gc
def timestep_transform(
    t,
    model_kwargs,
    base_resolution=512 * 512,
    base_num_frames=1,
    scale=1.0,
    num_timesteps=1,
):
    t = t / num_timesteps
    resolution = model_kwargs["height"] * model_kwargs["width"]
    ratio_space = (resolution / base_resolution).sqrt()
    # NOTE: currently, we do not take fps into account
    # NOTE: temporal_reduction is hardcoded, this should be equal to the temporal reduction factor of the vae
    if model_kwargs["num_frames"][0] == 1:
        num_frames = torch.ones_like(model_kwargs["num_frames"])
    else:
        num_frames = model_kwargs["num_frames"] // 17 * 5
    ratio_time = (num_frames / base_num_frames).sqrt()

    ratio = ratio_space * ratio_time * scale
    new_t = ratio * t / (1 + (ratio - 1) * t)

    new_t = new_t * num_timesteps
    return new_t

def calculate_similarity(matrix1, matrix2):
    # 将矩阵展平成向量
    vector1 = matrix1.flatten().float()
    vector2 = matrix2.flatten().float()

    # 计算点积
    dot_product = torch.dot(vector1, vector2)

    # 计算向量的模长
    norm1 = torch.norm(vector1)
    norm2 = torch.norm(vector2)

    # 计算余弦相似度
    cos_similarity = dot_product / (norm1 * norm2)

    # 计算角度（弧度转角度）
    # angle = torch.acos(cos_theta)

    return cos_similarity

def get_video(video_path, num_frames, width, height):
    vframes, vinfo = read_video(video_path, backend="av")
    video_fps = vinfo["video_fps"] if "video_fps" in vinfo else 24
    # Sampling video frames
    video = temporal_random_crop(vframes, num_frames, 1)
    # transform
    transform = get_transforms_video("center", (width, height))
    video = transform(video)
    video = video.permute(1, 0, 2, 3)
    return video.unsqueeze(dim=0).cuda(), video_fps

width = 256
height = 256
num_frames = 16
fps=8
device = "cuda:0"
video_path = "/run/determined/NAS1/Open-Sora-Plan-v1.0.0/data/pexels/landscape/landscape_3148931_034.mp4"
prompt = "a breathtaking aerial view of a winding river, nestled amidst lush greenery. The river, exhibiting a dark blue hue, meanders through the landscape, carving its path through the verdant terrain. On either side of the river, you can see patches of vibrant green rice fields, a testament to the agricultural bounty of the region.   The river is encircled by an abundance of trees and vegetation, painting a picture of a thriving ecosystem. The scene is taken from a high vantage point, providing a bird's eye view of the river and its surroundings. This perspective allows for a comprehensive view of the landscape, highlighting the interplay between the river, the rice fields, and the surrounding vegetation.   The scene does not provide any specific details that could be used to identify the exact location of this landmark. However, the presence of rice fields suggests that it could be located in a region where rice cultivation is prevalent. The winding river and the surrounding greenery are indicative of a rich, fertile landscape. The scene does not contain any discernible text or countable objects. The relative positions of the objects suggest a harmonious coexistence between the river, the rice fields, and the vegetation, with each element contributing to the overall beauty of the scene."
video, fps = get_video(video_path, num_frames, width, height)
video = video.to(dtype=torch.bfloat16)
one_batch_size = 1

vae_config = dict(
    type="OpenSoraVAE_V1_2",
    from_pretrained="/run/determined/NAS1/OpenSora-VAE-v1.2/",
    micro_frame_size=17,
    micro_batch_size=4,
)
vae = build_module(vae_config, MODELS)
latent_size = vae.get_latent_size((num_frames, width, height))
noise = torch.randn((1,4,4,32,32)).to(device)
text_encoder_config = dict(
    type="t5",
    #from_pretrained="/home/yubo/Open-Sora/checkpoints/Open-Sora/T5/",
    from_pretrained="/run/determined/NAS1/Open-Sora/T5/",
    model_max_length=300,
)
text_encoder = build_module(text_encoder_config, MODELS)
tokenizer = text_encoder.t5.tokenizer
scheduler_config = dict(
    type="rflow",
    use_timestep_transform=True,
    cfg_scale=7.0,
)
scheduler = build_module(scheduler_config, SCHEDULERS)
vae.to(device=device, dtype=torch.bfloat16)
text_encoder.t5.model.to(device=device)#, dtype=torch.bfloat16)
model_args = text_encoder.encode(text_preprocessing(prompt))
model_args["height"] = torch.tensor([width], device=device).repeat(one_batch_size)
model_args["width"]  = torch.tensor([height], device=device).repeat(one_batch_size)
model_args["num_frames"] = torch.tensor([num_frames], device=device).repeat(one_batch_size)
model_args["ar"]  = torch.tensor([width / height], device=device).repeat(one_batch_size)
model_args["fps"] = torch.tensor([fps], device=device).repeat(one_batch_size)
target_model_config = dict(
    type="STDiT3-XL/2",
    from_pretrained="/run/determined/NAS1/OpenSora-STDiT-v3",
    qk_norm=True,
    enable_flash_attn=False,
    enable_layernorm_kernel=True,
)
#/g0022010zlc/zyz/Open-Sora-v1.2/weights/OpenSora-STDiT-v3
def trapezoidal_integration(x, y):
    dx = x[1:] - x[:-1]
    avg_y = (y[1:] + y[:-1]) / 2
    integral = np.sum(dx * avg_y)
    return integral

def draw(checkpoint_name, pic_name, sample_steps):
    model_config = dict(
        type="STDiT3-XL/2",
        from_pretrained=checkpoint_name,
        qk_norm=True,
        enable_flash_attn=False,
        enable_layernorm_kernel=True,
    )
    dit = build_module(
            model_config,
            MODELS,
            input_size=latent_size,
            in_channels=vae.out_channels,
            caption_channels=text_encoder.output_dim,
            model_max_length=text_encoder.model_max_length
            )
    dit.to(device=device, dtype=torch.bfloat16)
    dit.eval()

    # pytorch_total_params = sum(p.numel() for p in dit.parameters())
    # print(f"total parameters:{pytorch_total_params}")

    latents = vae.encode(video).to(device=device, dtype=dit.dtype)
    # print(latents.shape)
    gt_v = (latents - noise).to(dtype=dit.dtype)
    similarities = []
    magnitude = []
    x_i = noise
    S = 0
    z_list = []
    for i in range(1000, 0, -sample_steps):
        s = torch.tensor([i], device=latents.device)
        #s = timestep_transform(s, model_args)
        with torch.no_grad():
            v_pred = dit(x_i, s, **model_args)
        B, C = v_pred.shape[:2]
        v_pred, _ = torch.split(v_pred, C // 2, dim=1)
        z_list.append(v_pred)
        similarity = calculate_similarity(gt_v, v_pred)
        similarities.append(similarity.cpu().detach().numpy())
        mag = (v_pred).abs().mean()
        magnitude.append(mag.cpu().detach().numpy())
        x_i = x_i + sample_steps / 1000 * v_pred
        #gt_v = v_pred
    
    S = 0
    for z in z_list:
        S += torch.mean((x_i - noise - z).pow(2))
    S = S / len(z_list) * 100
    import matplotlib.pyplot as plt
   
    x = range(1000, 0, -sample_steps)
    integral_a = trapezoidal_integration(np.array(list(x)), np.array(similarities)) / 1000 * -1
    integral_m = trapezoidal_integration(np.array(list(x)), np.array(magnitude)) / 1000 * -1
    fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 6))

    # 绘制第一个子图（angles）
    ax1.plot(x[0:], similarities[0:], label='Angles', color='b')
    ax1.set_title('Cos Similarity')
    ax1.set_xlabel('Range')
    ax1.set_ylabel('Similarity')
    ax1.legend()
    ax1.text(0.05, 0.95, f'Integral: {integral_a:.2f}', transform=ax1.transAxes, fontsize=12, verticalalignment='top')
    # 绘制第二个子图（magnitude）
    ax2.plot(x, magnitude, label='Magnitude', color='r')
    ax2.set_title('Magnitude')
    ax2.set_xlabel('Range')
    ax2.set_ylabel('Magnitude')
    ax2.legend()
    ax2.text(0.05, 0.95, f'Integral: {integral_m:.2f}', transform=ax2.transAxes, fontsize=12, verticalalignment='top')
    fig.text(0.5, 0.01, f'S = {S:.2f}', ha='center', fontsize=12)
    # 调整布局
    plt.tight_layout()
    plt.savefig(pic_name)
    del dit
    gc.collect()
    torch.cuda.empty_cache()


base_dir = "/run/determined/NAS1/determined/checkpoints"
output_dir = "/home/yunzhu/opensora_distill/assets/pics/"
setting = "flow_progressive_delta"

checkpoint_dir = os.path.join(base_dir, setting)
checkpoints = []
for i in os.listdir(checkpoint_dir):
    if "checkpoint" in i:
        checkpoints.append(os.path.join(i, "model"))


print(checkpoints)

for checkpoint in checkpoints:
    print(f"processing {checkpoint}")
    step = checkpoint.split("/")[0].split("-")[1]
    checkpoint = os.path.join(checkpoint_dir, checkpoint)
    sample_steps = [10, 30, 100, 200]
    op = os.path.join(output_dir, setting + "_gt_real")
    if not os.path.exists(op):
        os.makedirs(op)
    for sample_step in sample_steps:
        pic_name = "_".join([step, str(sample_step)]) + ".png"
        draw(checkpoint, os.path.join(op, pic_name), sample_step)
