# Hunyuan 3D is licensed under the TENCENT HUNYUAN NON-COMMERCIAL LICENSE AGREEMENT
# except for the third-party components listed below.
# Hunyuan 3D does not impose any additional limitations beyond what is outlined
# in the repsective licenses of these third-party components.
# Users must comply with all terms and conditions of original licenses of these third-party
# components and must ensure that the usage of the third party components adheres to
# all relevant laws and regulations.

# For avoidance of doubts, Hunyuan 3D means the large language models and
# their software and algorithms, including trained model weights, parameters (including
# optimizer states), machine-learning model code, inference-enabling code, training-enabling code,
# fine-tuning enabling code and other elements of the foregoing made publicly available
# by Tencent in accordance with TENCENT HUNYUAN COMMUNITY LICENSE AGREEMENT.

from PIL import Image
from hy3dshape.rembg import BackgroundRemover
from hy3dshape.pipelines import Hunyuan3DDiTFlowMatchingPipeline
import argparse
import torch
import os
from hy3dshape.distributed.parallel_mgr import ParallelConfig, init_parallel_env
import random
import numpy as np
from mindiesd import CacheConfig, CacheAgent
import time
from torch_npu.contrib import transfer_to_npu

def set_seed(seed):
    random.seed(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)

def get_args():
    parser = argparse.ArgumentParser()
    parser.add_argument("--use_attentioncache", action="store_true", help="Run or not using attention cache")
    parser.add_argument("--use_cfg_parallel", action="store_true", help="If use CFG parallel strategy")
    parser.add_argument("--seed", type=int, default=3407, help="Global random seed")
    parser.add_argument("--infer_times", type=int, default=5, help="infer times")
    parser.add_argument("--infer_steps", type=int, default=50, help="The total iteration steps of model")
    parser.add_argument("--start_step", type=int, default=15, help="The start iteration steps of cache")
    parser.add_argument("--attentioncache_interval", type=int, default=3, help="The step interval of cache")
    parser.add_argument("--end_step", type=int, default=45, help="The end iteration steps of cache")
    return parser.parse_args()


def get_ranks():
    world_size = int(os.getenv("WORLD_SIZE", 1))
    local_rank = int(os.environ["LOCAL_RANK"])
    rank = int(os.getenv("RANK", 0))
    return world_size, local_rank, rank


def parallel_initialize(args):
    world_size, local_rank, rank = get_ranks()
    torch.cuda.set_device(local_rank)
    torch.distributed.init_process_group(
        backend="hccl",
        init_method="env://",
        rank=rank,
        world_size=world_size)

    if args.use_cfg_parallel:
        if world_size == 2:
            sp_degree = 1
        else:
            sp_degree = world_size // 2

        parallel_config = ParallelConfig(
            sp_degree=sp_degree,
            ulysses_degree=sp_degree,
            ring_degree=1,
            tp_degree=1,
            use_cfg_parallel=True,
            world_size=world_size,
        )
        init_parallel_env(parallel_config)


if __name__ == "__main__":
    args = get_args()
    set_seed(args.seed)
    world_size, local_rank, rank = get_ranks()

    if world_size > 1:
        parallel_initialize(args)

    model_path = 'tencent/Hunyuan3D-2.1'
    pipeline_shapegen = Hunyuan3DDiTFlowMatchingPipeline.from_pretrained(model_path)

    # Attention Cache
    if args.use_attentioncache:
        config = CacheConfig(
            method="attention_cache",
            blocks_count=len(pipeline_shapegen.model.blocks),
            steps_count=args.infer_steps,
            step_start=args.start_step,
            step_interval=args.attentioncache_interval,
            step_end=args.end_step
        )
    else:
        config = CacheConfig(
            method="attention_cache",
            blocks_count=len(pipeline_shapegen.model.blocks),
            steps_count=args.infer_steps
        )
    cache_agent = CacheAgent(config)
    for block in pipeline_shapegen.model.blocks:
        block.cache = cache_agent

    image_path = 'demos/demo.png'

    image = Image.open(image_path).convert("RGBA")
    if image.mode == 'RGB':
        rembg = BackgroundRemover()
        image = rembg(image)

    image = image_path
    for i in range(args.infer_times):
        start = time.time()
        mesh = pipeline_shapegen(image=image, use_cfg_parallel=args.use_cfg_parallel)[0]
        if world_size == 1 or (world_size > 1 and local_rank == 0):
            mesh.export('demo.glb')
            end = time.time()
            print(f"Shape Inference Times: {end - start}s.")