#!/usr/bin/env python  
# -*- coding:utf-8 _*-
"""
@file: export_onnx.py 
@time: 2023/07/14
@author: xingwg 
@contact: xwg031459@163.com
@software: PyCharm 
"""
import os
import onnx
import onnx_graphsurgeon as gs
import einops
import numpy as np
import torch
from torch import einsum
from cldm.model import create_model, load_state_dict
from cldm.hack import disable_verbosity
from ldm.modules.diffusionmodules.util import make_ddim_sampling_parameters,\
    make_ddim_timesteps, make_beta_schedule, noise_like


disable_verbosity()

save_dir = "onnx"
if not os.path.exists(save_dir):
    os.makedirs(save_dir)

model = create_model('./models/cldm_v15.yaml').cpu()
model.load_state_dict(load_state_dict('/home/player/ControlNet/models/control_sd15_canny.pth', location='cpu'))
model.cpu()


class CLIPNet(torch.nn.Module):
    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self.tokenizer = model.cond_stage_model.tokenizer
        self.transformer = model.cond_stage_model.transformer

    def forward(self, x):
        x = self.transformer(x, None, None, None, False, None)
        x = x.last_hidden_state
        return x


clip = CLIPNet()
x_ = torch.ones((2, 77), dtype=torch.int32).cpu()
torch.onnx.export(clip, x_, "onnx/clip.onnx", opset_version=18)


class GuidedHintNet(torch.nn.Module):
    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self.net = model.control_model.input_hint_block

    def forward(self, x):
        x = x.float() / 255.0
        x = torch.cat([x, x, x], dim=1)  # 1x3x256x384
        x = self.net(x, None, None)  # 未用到  context
        return x


class ControlNet(torch.nn.Module):
    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self.control_model = model.control_model
        self.diffusion_model = model.model.diffusion_model
        self.ddpm_num_timesteps = 1000
        self.schedule = "linear"
        self.ddim_timesteps = None
        self.ddim_sigmas = None
        self.ddim_alphas = None
        self.ddim_alphas_prev = None
        self.ddim_sqrt_one_minus_alphas = None
        self.ddim_sigmas_for_original_num_steps = None
        betas = make_beta_schedule(
            schedule="linear", n_timestep=1000, linear_start=0.00085, linear_end=0.012, cosine_s=0.008)
        alphas = 1. - betas
        alphas_cumprod = np.cumprod(alphas, axis=0, dtype=np.float32)
        alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1]).astype(np.float32)

        self.betas = torch.from_numpy(betas.astype(np.float32)).cpu()
        self.alphas_cumprod = torch.from_numpy(alphas_cumprod).cpu()
        self.alphas_cumprod_prev = torch.from_numpy(alphas_cumprod_prev).cpu()

        self.sqrt_alphas_cumprod = torch.from_numpy(np.sqrt(alphas_cumprod).astype(np.float32))
        self.sqrt_one_minus_alphas_cumprod = torch.from_numpy(np.sqrt(1. - alphas_cumprod).astype(np.float32)).cpu()
        self.log_one_minus_alphas_cumprod = torch.from_numpy(np.log(1. - alphas_cumprod).astype(np.float32)).cpu()
        self.sqrt_recip_alphas_cumprod = torch.from_numpy(np.sqrt(1. / alphas_cumprod).astype(np.float32)).cpu()
        self.sqrt_recipm1_alphas_cumprod = torch.from_numpy(np.sqrt(1. / alphas_cumprod - 1).astype(np.float32)).cpu()

        self.make_schedule(ddim_num_steps=20, ddim_eta=0.0, verbose=False)

    def make_schedule(self, ddim_num_steps, ddim_discretize="uniform", ddim_eta=0., verbose=True):
        self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps,
                                                  num_ddpm_timesteps=self.ddpm_num_timesteps, verbose=verbose)
        alphas_cumprod = self.alphas_cumprod
        assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, "alphas have to be defined for each timestep"

        # ddim sampling parameters
        ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(
            alphacums=alphas_cumprod.cpu(), ddim_timesteps=self.ddim_timesteps, eta=ddim_eta, verbose=verbose)

        self.ddim_sigmas = ddim_sigmas.cpu()
        self.ddim_alphas = ddim_alphas.cpu()
        self.ddim_alphas_prev = torch.from_numpy(ddim_alphas_prev).cpu()
        self.ddim_sqrt_one_minus_alphas = torch.sqrt(1. - ddim_alphas).cpu()
        self.ddim_sigmas_for_original_num_steps = ddim_eta * torch.sqrt(
            (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * (1 - self.alphas_cumprod / self.alphas_cumprod_prev))

        self.ddim_timesteps = np.flip(self.ddim_timesteps)

    def forward(self, x, c_concat, ts, c_crossattn, index):
        # xs = torch.cat((x, x), dim=0)
        control = self.control_model(x, c_concat, ts, c_crossattn)
        y = self.diffusion_model(x, ts, c_crossattn, control, False)
        model_t = y[0:1, :, :, :]
        model_uncond = y[1:, :, :, :]
        e_t = model_uncond + 9. * (model_t - model_uncond)

        a_t = self.ddim_alphas[index[0]]
        a_prev = self.ddim_alphas_prev[index[0]]
        sqrt_one_minus_at = self.ddim_sqrt_one_minus_alphas[index[0]]
        pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()
        dir_xt = (1. - a_prev).sqrt() * e_t
        x = a_prev.sqrt() * pred_x0 + dir_xt
        return x


x_noisy = torch.randn((1, 4, 32, 48), dtype=torch.float32).cpu()
hint = torch.zeros((1, 1, 256, 384), dtype=torch.uint8).cpu()
guided_hint = torch.randn((1, 320, 32, 48), dtype=torch.float32).cpu()
t = torch.ones([1], dtype=torch.int32).cpu()
context = torch.randn((2, 77, 768), dtype=torch.float32).cpu()
idx = torch.ones([1], dtype=torch.int32).cpu()
controlnet = ControlNet()
controlnet.eval()
controlnet0 = GuidedHintNet()
controlnet0.eval()
torch.onnx.export(controlnet0, hint, os.path.join(save_dir, "hint.onnx"), opset_version=18)
torch.onnx.export(controlnet, (x_noisy, guided_hint, t, context, idx), os.path.join(save_dir, "controlnet.onnx"), opset_version=18)


class Decoder(torch.nn.Module):
    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self.post_quant_conv = model.first_stage_model.post_quant_conv
        self.decoder = model.first_stage_model.decoder

    def forward(self, x):
        x = 1. / 0.18215 * x
        x = self.post_quant_conv(x)
        x = self.decoder(x)
        x = x.permute(0, 2, 3, 1)
        x = x * 127.5 + 127.5
        x = x.clip(0, 255).type(torch.uint8)
        return x


decoder = Decoder()
decoder.eval()
z = torch.randn((1, 4, 32, 48), dtype=torch.float32).cpu()
torch.onnx.export(decoder, z, os.path.join(save_dir, "decoder.onnx"), opset_version=18)
