# encoding=utf-8
import torch
import onnxruntime
from Common.AppConfig import AppConfig
from ThirdPart.Segmentation.model import Unet
from onnxruntime.quantization import QuantType
from onnxruntime.quantization.quantize import quantize_dynamic


def builder():
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    model = Unet(3, 6)
    model_dict = torch.load(AppConfig.RES_SEG_UNET)
    model.load_state_dict(model_dict)
    model.eval()
    model = model.to(device)
    return model, device


def output(model, device):
    rand_input = torch.randn([1, 3, 512, 512]).to(device)

    onnx_path = "unet-output.onnx"
    with open(onnx_path, 'wb') as f:
        torch.onnx.export(
            model,
            rand_input,
            f,
            export_params=True,
            verbose=False
        )

    onnx_model = onnxruntime.InferenceSession(
        onnx_path, provider_options=['CPUExecutionProvider', 'CUDAExecutionProvider']
    )
    for e, _e in enumerate(onnx_model.get_inputs()):
        print(_e.name, _e.shape)

    for e, _e in enumerate(onnx_model.get_outputs()):
        print(_e.name, _e.shape)
    return onnx_path


def quant(onnx_path):
    onnx_output = "unet-quant.onnx"
    quantize_dynamic(
        model_input=onnx_path,
        model_output=onnx_output,
        per_channel=False,
        reduce_range=False,
        weight_type=QuantType.QUInt8,
    )


if __name__ == '__main__':
    m, d = builder()
    onnx_path = output(m, d)
    quant(onnx_path)
    # pip install netron
    # netron unet-quant.onnx
