import onnx
import numpy as np
import onnxruntime as ort


print('dequan custom op sample with CUDA')

# load custom op library
option = ort.SessionOptions()
# specify execution providers which you use
option.register_custom_ops_library('dequan_with_cuda/build/libcustom_op_dequan.so')
providers = ['CUDAExecutionProvider']
# providers = ['CPUExecutionProvider']
# opset v17 at default domain
opsets = [onnx.helper.make_opsetid('', 17)]

# ONNX model creation
inputs = [
    onnx.helper.make_tensor_value_info('input', onnx.TensorProto.INT16, [64, 3, 7, 7]),
    onnx.helper.make_tensor_value_info('scale', onnx.TensorProto.FLOAT, [64]),
    onnx.helper.make_tensor_value_info('zeropoint', onnx.TensorProto.INT16, [64]),
]
outputs = [onnx.helper.make_tensor_value_info('out', onnx.TensorProto.FLOAT, [64, 3, 7, 7])]
nodes = [onnx.helper.make_node('DequantizeLinear', ['input', 'scale', 'zeropoint'], ['out'], "dequan0000", domain='ivot_cvai')]
graph = onnx.helper.make_graph(nodes, 'dequan', inputs, outputs)
model = onnx.helper.make_model(graph, opset_imports=opsets)
onnx.save(model, "dequan_with_cuda/dequan.onnx")
print(f"save to dequan_with_cuda/dequan.onnx")

# create session object with custom op, CUDAExecutionProvider
sess = ort.InferenceSession(model.SerializeToString(), option, providers)

# input data
input = np.ones([64, 3, 7, 7], dtype=np.int16)
scale = np.ones([64], dtype=np.float32)
zp = np.ones([64], dtype=np.int16)

# inference
results = sess.run(None, {'input': input, 'scale': scale, 'zeropoint': zp})

# print result
# print(f'results -> {results[0]}')


