import sys
import argparse
sys.path.append("./")

from src.transformer import SymmetricTransformer2DModel
from src.pipeline import UnifiedPipeline
from src.scheduler import Scheduler
from train.trainer_utils import load_images_to_tensor

import torch
from transformers import (
    CLIPTextModelWithProjection,
    CLIPTokenizer,
)
from diffusers import VQModel


def parse_args():
    parser = argparse.ArgumentParser(description="Run Meissonic inference.")
    parser.add_argument("--model_path", type=str, default="MeissonFlow/Meissonic")
    parser.add_argument("--transformer_path", type=str, default="MeissonFlow/Meissonic")
    parser.add_argument("--image_path_or_dir", type=str, default="./output")
    parser.add_argument("--resolution", type=int, default=512)
    parser.add_argument("--steps", type=int, default=64)
    parser.add_argument("--cfg", type=float, default=9.0)
    parser.add_argument("--device", type=str, default="cuda")
    return parser.parse_args()


def main():
    args = parse_args()

    model = SymmetricTransformer2DModel.from_pretrained(
        args.transformer_path 
    )
    vq_model = VQModel.from_pretrained(args.model_path, subfolder="vqvae")
    text_encoder = CLIPTextModelWithProjection.from_pretrained(args.model_path, subfolder="text_encoder")
    tokenizer = CLIPTokenizer.from_pretrained(args.model_path, subfolder="tokenizer")
    scheduler = Scheduler.from_pretrained(args.model_path, subfolder="scheduler")

    pipe = UnifiedPipeline(
        vqvae=vq_model,
        tokenizer=tokenizer,
        text_encoder=text_encoder,
        transformer=model,
        scheduler=scheduler,
    )
    pipe.to(args.device)

    try:
        images, images_gray = load_images_to_tensor(args.image_path_or_dir, target_size=(args.resolution, args.resolution))
    except:
        images = None
        images_gray = None

    ### image to gray ####
    output_gray = pipe(
        image=images,
        height=args.resolution,
        width=args.resolution,
        num_inference_steps=args.steps,
        generator=torch.manual_seed(42)
    )

    for i, image in enumerate(output_gray.images_gray):
        image.save(f"inference_image/{i}_512_image_gray.png")

    ### gray to image ####
    output = pipe(
        image_gray=images_gray,
        height=args.resolution,
        width=args.resolution,
        num_inference_steps=args.steps,
        generator=torch.manual_seed(42)
    )

    for i, image in enumerate(output.images):
        image.save(f"inference_image/{i}_512_image.png")

if __name__ == "__main__":
    main()
