# -*-coding: utf-8 -*-

from icecream import ic as print
from time import time
import cv2
import numpy as np
import onnxruntime
import os
import sys
from imgaug import augmenters as iaa
sys.path.append(os.getcwd())


class ONNXModel():
    def __init__(self, onnx_path):
        """
        :param onnx_path:
        """
        self.onnx_session = onnxruntime.InferenceSession(
            onnx_path, providers=['CUDAExecutionProvider', 'CPUExecutionProvider'])
        self.input_name = self.get_input_name(self.onnx_session)
        self.output_name = self.get_output_name(self.onnx_session)
        # print("input_name:{}".format(self.input_name))
        # print("output_name:{}".format(self.output_name))
        self.forward_(1)

    def get_output_name(self, onnx_session):
        """
        output_name = onnx_session.get_outputs()[0].name
        :param onnx_session:
        :return:
        """
        output_name = []
        for node in onnx_session.get_outputs():
            output_name.append(node.name)
        return output_name

    def get_input_name(self, onnx_session):
        """
        input_name = onnx_session.get_inputs()[0].name
        :param onnx_session:
        :return:
        """
        input_name = []
        for node in onnx_session.get_inputs():
            input_name.append(node.name)
        return input_name

    def get_input_feed(self, input_name, image_tensor):
        """
        input_feed={self.input_name: image_tensor}
        :param input_name:
        :param image_tensor:
        :return:
        """
        input_feed = {}
        for name in input_name:
            input_feed[name] = image_tensor
        return input_feed

    def prepress(self):
        img = cv2.imread("/home/u20/code/bisenet/demo.jpg")
        img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
        image = cv2.resize(img, (960, 720))/255.0
        # print(image[0])
        mean = (0.485, 0.456, 0.406)
        std = (0.229, 0.224, 0.225)
        
        for i in range(3):
            image[:, :, i] = (image[:, :, i]-mean[i])/std[i]
        np.savetxt("mean.txt",image.reshape(-1))
        # # print(image.shape)
        return np.expand_dims(np.transpose(image, (2, 0, 1)), axis=0).astype(np.float32)
        # from torchvision import transforms
        # image = transforms.ToTensor()(image)
        # # blob = np.expand_dims(np.transpose(img, (2, 0, 1)), axis=0)


        # image = transforms.Normalize(
        #     (0.485, 0.456, 0.406), (0.229, 0.224, 0.225))(image).unsqueeze(0)

        # image=cv2.dnn.blobFromImage(image, scalefactor=1.0/255.0, size=(960, 720),
        #                       mean=(0.485, 0.456, 0.406), swapRB=False, crop=False)

        return image.numpy().astype(np.float32)

    def forward_(self, image_tensor):
        import time

        input=self.prepress()
        print(input.shape)
        np.savetxt("a.txt",input.reshape(-1))
        for i in range(1):
            t1=time.perf_counter()
            outs=self.onnx_session.run(None, {"input": input})[0][0]
            print(time.perf_counter()-t1)
        print(outs.shape)
        
        asdp=np.argmax(outs, axis=0)*80
        simg=(asdp.astype(np.uint8))
        cv2.imshow("llll", simg)
        cv2.waitKey()


net=ONNXModel('model.onnx')
