#!/usr/bin/env python  
# -*- coding:utf-8 _*-
""" 
@file: canny2image_TRT2.py 
@time: 2023/07/16
@author: xingwg 
@contact: xwg031459@163.com
@software: PyCharm 
"""
import time
import ctypes
import einops
import random
import torch
import numpy as np
from cuda import cudart

from pytorch_lightning import seed_everything
from annotator.util import resize_image, HWC3
from annotator.canny import CannyDetector
from transformers import CLIPTokenizer, CLIPTextModel
from cldm.hack import disable_verbosity
from ldm.modules.diffusionmodules.util import make_ddim_timesteps

from trt_infer import TRTInfer

disable_verbosity()


plugin_lib = "./libplugin.so"
ctypes.cdll.LoadLibrary(plugin_lib)
print("load plugin:", plugin_lib)


class hackathon(object):
    def __init__(self, num_ddim_timesteps=12, num_ddpm_timesteps=1000):
        self.canny = CannyDetector()
        self.tokenizer = CLIPTokenizer.from_pretrained("openai/clip-vit-large-patch14")
        # self.text_encoder = CLIPTextModel.from_pretrained("openai/clip-vit-large-patch14").cuda()
        self.clip = TRTInfer("clip_fp16.trt")
        self.guided_hint = TRTInfer("hint_fp16.trt")
        self.control_net = TRTInfer("controlnet_fp16.trt")
        self.decoder = TRTInfer("decoder_fp16.trt")

        self.ddim_timesteps = make_ddim_timesteps(
            ddim_discr_method="uniform", num_ddim_timesteps=num_ddim_timesteps, num_ddpm_timesteps=num_ddpm_timesteps, verbose=False)

        self.ddim_timesteps = np.flip(self.ddim_timesteps)

    def initialize(self):
        pass

    def process(self, input_image, prompt, a_prompt, n_prompt, num_samples,
                image_resolution, ddim_steps, guess_mode, strength, scale, seed, eta, low_threshold, high_threshold):
        img = resize_image(HWC3(input_image), image_resolution)
        H, W, C = img.shape

        detected_map = self.canny(img, low_threshold, high_threshold)
        control = torch.from_numpy(detected_map).cuda().contiguous()  # 256x384
        control_data_ptr = control.data_ptr()

        if seed == -1:
            seed = random.randint(0, 65535)
        seed_everything(seed)

        size = (1, 4, H // 8, W // 8)
        noise_data_ptr = torch.randn(size, device="cuda:0").contiguous().data_ptr()

        # step1 clip
        text = prompt + ", " + a_prompt
        tokens = self.tokenizer([text, n_prompt] * num_samples, truncation=True, max_length=77,
                                return_length=True, return_overflowing_tokens=False, padding="max_length", return_tensors="pt")
        input_ids = tokens["input_ids"].cuda().type(torch.int32).contiguous()
        input_ids_ptr = input_ids.data_ptr()
        clip_input_data_ptr = self.clip.inputs[0]["tensor"].data_ptr()
        clip_input_data_size = self.clip.inputs[0]["size"]
        cudart.cudaMemcpy(clip_input_data_ptr, input_ids_ptr, clip_input_data_size, cudart.cudaMemcpyKind.cudaMemcpyDeviceToDevice)
        self.clip.infer([clip_input_data_ptr])
        clip_output_data_ptr = self.clip.outputs[0]["tensor"].data_ptr()
        clip_output_data_size = self.clip.outputs[0]["size"]

        # step2 controlnet
        # step2.1 guided_hint
        guided_hint_input_data_ptr = self.guided_hint.inputs[0]["tensor"].data_ptr()
        guided_hint_input_data_size = self.guided_hint.inputs[0]["size"]
        guided_hint_output_data_ptr = self.guided_hint.outputs[0]["tensor"].data_ptr()
        guided_hint_output_data_size = self.guided_hint.outputs[0]["size"]
        cudart.cudaMemcpy(guided_hint_input_data_ptr, control_data_ptr, guided_hint_input_data_size, cudart.cudaMemcpyKind.cudaMemcpyDeviceToDevice)
        self.guided_hint.infer([guided_hint_input_data_ptr])

        # step2.2 control + unet
        x_data_ptr = self.control_net.inputs[0]["tensor"].data_ptr()
        x_data_size = self.control_net.inputs[0]["size"]
        guided_hint_data_ptr = self.control_net.inputs[1]["tensor"].data_ptr()
        ts_data_ptr = self.control_net.inputs[2]["tensor"].data_ptr()
        c_crossattn_data_ptr = self.control_net.inputs[3]["tensor"].data_ptr()
        idx_data_ptr = self.control_net.inputs[4]["tensor"].data_ptr()
        output_data_ptr = self.control_net.outputs[0]["tensor"].data_ptr()
        output_data_size = self.control_net.outputs[0]["size"]

        cudart.cudaMemcpy(x_data_ptr, noise_data_ptr, x_data_size, cudart.cudaMemcpyKind.cudaMemcpyDeviceToDevice)
        cudart.cudaMemcpy(c_crossattn_data_ptr, clip_output_data_ptr, clip_output_data_size, cudart.cudaMemcpyKind.cudaMemcpyDeviceToDevice)
        cudart.cudaMemcpy(guided_hint_data_ptr, guided_hint_output_data_ptr, guided_hint_output_data_size, cudart.cudaMemcpyKind.cudaMemcpyDeviceToDevice)

        total_steps = self.ddim_timesteps.shape[0]
        for i, step in enumerate(self.ddim_timesteps):
            self.control_net.inputs[4]["tensor"][0] = total_steps - i - 1
            self.control_net.inputs[2]["tensor"][0] = step
            self.control_net.infer([x_data_ptr, guided_hint_data_ptr, ts_data_ptr, c_crossattn_data_ptr, idx_data_ptr])
            cudart.cudaMemcpy(x_data_ptr, output_data_ptr, output_data_size, cudart.cudaMemcpyKind.cudaMemcpyDeviceToDevice)

        # step3 decoder
        decoder_input_data_ptr = self.decoder.inputs[0]["tensor"].data_ptr()
        cudart.cudaMemcpy(decoder_input_data_ptr, x_data_ptr, output_data_size, cudart.cudaMemcpyKind.cudaMemcpyDeviceToDevice)
        self.decoder.infer([decoder_input_data_ptr])
        x_samples = self.decoder.outputs[0]["tensor"].cpu().numpy()
        return [x_samples[0]]
