MODE_TF = 0
MODE_TFLITE = 1
MODE_ORT = 2
MODE_DNN = 3
MODE_MNN = 4
MODE_RKNN = 5


class Engine(object):
    def __init__(self, batch_size=None):
        self.batch_size = batch_size

    def invoke(self, inputs, **kwargs):
        import numpy as np

        if self.batch_size is not None:
            n, b = inputs[0].shape[0], self.batch_size
            outputs = []
            for i in range((n-1)//b+1):
                inputs_b = [x[i*b:(i+1)*b] for x in inputs]
                rb = inputs_b[0].shape[0]
                if rb < b:
                    inputs_b = [np.concatenate([x, np.zeros((b-rb, *x.shape[1:]), dtype=x.dtype)], axis=0)
                                for x in inputs_b]
                outputs_b = self.invoke_b(inputs_b, **kwargs)
                outputs_b = [x[:rb] for x in outputs_b]
                outputs.append(outputs_b)
            outputs = [np.concatenate([x[i] for x in outputs], axis=0) for i in range(len(outputs[0]))]
        else:
            outputs = self.invoke_b(inputs, **kwargs)

        return outputs

    def invoke_b(self, inputs, **kwargs):
        raise NotImplementedError


class TFEngine(Engine):
    def __init__(self, model_path, input_names, output_names, batch_size=None,
                 allow_growth=True, per_process_gpu_memory_fraction=None, **kwargs):
        super(TFEngine, self).__init__(batch_size=batch_size)

        import tensorflow as tf

        graph = tf.Graph()
        with graph.as_default():
            graph_def = tf.compat.v1.GraphDef()
            with tf.io.gfile.GFile(model_path, 'rb') as fp:
                serialized_graph = fp.read()
                graph_def.ParseFromString(serialized_graph)
                tf.import_graph_def(graph_def, name='')

        self.input_tensors = [graph.get_tensor_by_name(x) for x in input_names]
        self.output_tensors = [graph.get_tensor_by_name(x) for x in output_names]

        config = tf.compat.v1.ConfigProto()
        config.gpu_options.allow_growth = allow_growth
        if per_process_gpu_memory_fraction is not None:
            config.gpu_options.per_process_gpu_memory_fraction = per_process_gpu_memory_fraction

        self.session = tf.compat.v1.Session(graph=graph, config=config)

    def invoke_b(self, inputs, **kwargs):
        outputs = self.session.run(self.output_tensors,
                                   feed_dict={k: v for k, v in zip(self.input_tensors, inputs)})
        return outputs


class TFLiteEngine(Engine):
    def __init__(self, model_path, batch_size=None, **kwargs):
        super(TFLiteEngine, self).__init__(batch_size=batch_size)

        import tensorflow as tf

        self.interpreter = tf.lite.Interpreter(model_path)
        self.interpreter.allocate_tensors()

        self.input_details = self.interpreter.get_input_details()
        self.output_details = self.interpreter.get_output_details()

    def invoke_b(self, inputs, **kwargs):
        for i in range(len(self.input_details)):
            self.interpreter.set_tensor(self.input_details[i]['index'], inputs[i])

        self.interpreter.invoke()

        outputs = [self.interpreter.get_tensor(self.output_details[i]['index'])
                   for i in range(len(self.output_details))]

        return outputs


class ORTEngine(Engine):
    def __init__(self, model_path, batch_size=None, sess_options=None, providers=[], **kwargs):
        super(ORTEngine, self).__init__(batch_size=batch_size)

        import onnxruntime

        self.batch_size = batch_size
        self.session = onnxruntime.InferenceSession(model_path, sess_options=sess_options, providers=providers)
        self.input_names = [x.name for x in self.session.get_inputs()]
        self.output_names = [x.name for x in self.session.get_outputs()]

    def invoke_b(self, inputs, run_options=None, **kwargs):
        outputs = self.session.run(self.output_names,
                                   {k: v for k, v in zip(self.input_names, inputs)},
                                   run_options=run_options)
        return outputs


class DNNEngine(Engine):
    def __init__(self, model_path, framework='onnx', batch_size=None, input_names=None, output_names=None,
                 backendId=None, targetId=None, **kwargs):
        super(DNNEngine, self).__init__(batch_size=batch_size)

        import cv2

        self.input_names = input_names
        self.output_names = output_names

        self.net = self._read_net(model_path, framework)
        self.net.setPreferableBackend(cv2.dnn.DNN_BACKEND_OPENCV if backendId is None else backendId)
        self.net.setPreferableTarget(cv2.dnn.DNN_TARGET_CPU if targetId is None else targetId)

    def _read_net(self, model_path, framework):
        import cv2

        if framework == 'onnx':
            return cv2.dnn.readNetFromONNX(model_path)
        elif framework == 'caffe':
            return cv2.dnn.readNetFromCaffe(*model_path)
        else:
            print('unsupported framework')
            exit(0)

    def invoke_b(self, inputs, **kwargs):
        if self.input_names is None:
            self.net.setInput(inputs[0])
        else:
            for k, v in zip(self.input_names, inputs):
                self.net.setInput(v, k)

        if self.output_names is None:
            outputs = [self.net.forward()]
        else:
            outputs = self.net.forward(self.output_names)

        return outputs


class MNNEngine(Engine):
    def __init__(self, model_path, output_names, tensor_type, batch_size=None, **kwargs):
        super(MNNEngine, self).__init__(batch_size=batch_size)

        import MNN
        self.interpreter = MNN.Interpreter(model_path)
        self.session = self.interpreter.createSession()
        self.output_names = output_names
        self.tensor_type = tensor_type

    def invoke_b(self, inputs, **kwargs):
        import MNN

        self.input_tensor = self.interpreter.getSessionInput(self.session)
        if self.tensor_type == 'Tensor_DimensionType_Tensorflow':
            self.tmp_input = MNN.Tensor(inputs.shape, MNN.Halide_Type_Float, inputs, MNN.Tensor_DimensionType_Tensorflow)
        elif self.tensor_type == 'Tensor_DimensionType_Caffe':
            self.tmp_input = MNN.Tensor(inputs.shape, MNN.Halide_Type_Float, inputs, MNN.Tensor_DimensionType_Caffe)
        self.input_tensor.copyFrom(self.tmp_input)
        self.interpreter.runSession(self.session)
        outputs = []
        for name in self.output_names:
            self.data = self.interpreter.getSessionOutput(self.session, name)
            outputs.append(self.data.getData())

        return outputs


class RKNNEngine(Engine):
    def __init__(self, model_path, batch_size=None, **kwargs):
        super(RKNNEngine, self).__init__(batch_size=batch_size)

        from rknn.api import RKNN

        self.model = RKNN(verbose=False)
        ret = self.model.load_rknn(model_path)
        if ret != 0:
            print('Load model failed')
            exit(ret)
        ret = self.model.init_runtime()
        if ret != 0:
            print('Init runtime environment failed')
            exit(ret)

    def invoke_b(self, inputs, **kwargs):
        outputs = self.model.inference(inputs=inputs)
        return outputs
