# SPDX-License-Identifier: Apache-2.0
import os

import numpy as np
import pytest
import torch
from diffusers import AutoencoderKLWan

from fastvideo.configs.pipelines import PipelineConfig
from fastvideo.fastvideo_args import FastVideoArgs
from fastvideo.logger import init_logger
from fastvideo.models.loader.component_loader import VAELoader
from fastvideo.configs.models.vaes import WanVAEConfig
from fastvideo.utils import maybe_download_model
from torch.testing import assert_close

logger = init_logger(__name__)

os.environ["MASTER_ADDR"] = "localhost"
os.environ["MASTER_PORT"] = "29503"

BASE_MODEL_PATH = "Wan-AI/Wan2.1-T2V-1.3B-Diffusers"
MODEL_PATH = maybe_download_model(BASE_MODEL_PATH,
                                  local_dir=os.path.join("data", BASE_MODEL_PATH) # store in the large /workspace disk on Runpod
                                  )
VAE_PATH = os.path.join(MODEL_PATH, "vae")


@pytest.mark.usefixtures("distributed_setup")
def test_wan_vae():
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    precision = torch.float32
    precision_str = "fp32"
    args = FastVideoArgs(model_path=VAE_PATH, pipeline_config=PipelineConfig(vae_config=WanVAEConfig(), vae_precision=precision_str))
    args.device = device
    args.vae_cpu_offload = False

    loader = VAELoader()
    model2 = loader.load(VAE_PATH, args)
    assert model2.use_feature_cache # Default to use the original WanVAE algorithm

    model1 = AutoencoderKLWan.from_pretrained(
        VAE_PATH, torch_dtype=precision).to(device).eval()

    # Create identical inputs for both models
    batch_size = 1

    # Video input [B, C, T, H, W]
    input_tensor = torch.randn(batch_size,
                               3,
                               81,
                               32,
                               32,
                               device=device,
                               dtype=precision)
    # latent_tensor = torch.randn(batch_size,
    #                             16,
    #                             21,
    #                             32,
    #                             32,
    #                             device=device,
    #                             dtype=precision)

    # Disable gradients for inference
    with torch.no_grad():
        # Test encoding
        logger.info("Testing encoding...")
        latent1 = model1.encode(input_tensor).latent_dist
        print("--------------------------------")
        latent2 = model2.encode(input_tensor)
        # Check if latents have the same shape
        assert latent1.mean.shape == latent2.mean.shape, f"Latent shapes don't match: {latent1.mean.shape} vs {latent2.mean.shape}"
        # Check if latents are similar
        assert_close(latent1.mean, latent2.mean, atol=1e-4, rtol=1e-4)
        # Test decoding
        logger.info("Testing decoding...")
        latent1_tensor = latent1.mode()
        mean1 = (torch.tensor(model1.config.latents_mean).view(
            1, model1.config.z_dim, 1, 1, 1).to(input_tensor.device,
                                                input_tensor.dtype))
        std1 = (1.0 / torch.tensor(model1.config.latents_std).view(
            1, model1.config.z_dim, 1, 1, 1)).to(input_tensor.device,
                                                input_tensor.dtype)
        latent1_tensor = latent1_tensor / std1 + mean1
        output1 = model1.decode(latent1_tensor).sample

        mean2 = model2.config.arch_config.shift_factor.to(input_tensor.device, input_tensor.dtype)
        std2 = model2.config.arch_config.scaling_factor.to(input_tensor.device, input_tensor.dtype)
        latent2_tensor = latent2.mode()
        latent2_tensor = latent2_tensor / std2 + mean2
        output2 = model2.decode(latent2_tensor)
        # Check if outputs have the same shape
        assert output1.shape == output2.shape, f"Output shapes don't match: {output1.shape} vs {output2.shape}"

        # Check if outputs are similar
        assert_close(output1, output2, atol=1e-5, rtol=1e-3)
