import time
try:
    from ais_bench.infer.interface import InferSession, MemorySummary
    from ais_bench.infer.summary import summary
except Exception:
    print()
try:
    import mindspore_lite as mslite
except Exception:
    print()
try:  
    import onnxruntime as ort
except Exception:
    print()
import numpy as np
 

class YoloAclInfer:
    def __init__(self, device_id=0, model_path="loftr.om"):
        # load model
        self.device_id=0
        self.model_path = model_path
        self.model = InferSession(self.device_id, self.model_path)
    
    def predict(self,img):
        out = self.model.infer([img,])[0]
        return out

class YoloMsliteInfer:
    def __init__(self, device_id=0, model_path="loftr.mindir"):
        # # Create network
        context = mslite.Context()
        context.ascend.device_id=device_id
        context.target=['Ascend']
        context.cpu.thread_num=1
        context.cpu.thread_affinity_mode=2
        # model_path=model_path
        self.network = mslite.Model()
        self.network.build_from_file(model_path, mslite.ModelType.MINDIR, context)  
        self.inputs = self.network.get_inputs()
    
    def predict(self,imgs):
        if imgs.shape[0] != self.inputs[0].shape[0]:
            self.network.resize(self.inputs, [[imgs.shape[0], 3, 640,640]])
        self.inputs[0].set_data_from_numpy(imgs)
        t1 = time.time()
        out= self.network.predict(self.inputs)
        print("infer time:", time.time()-t1)
        out = out[0].get_data_to_numpy()
        return out

class YoloONNXRuntimeInfer:
    def __init__(self, device_id=0, model_path="loftr.onnx"):
        # set envs,threads and so on
        session_options = ort.SessionOptions()
        session_options.log_severity_level = 3
        session_options.graph_optimization_level = ort.GraphOptimizationLevel.ORT_ENABLE_BASIC
        session_options.intra_op_num_threads = 4
 
        # # Create network
        available_providers = ort.get_available_providers()
        device = ort.get_device()
        print(f"Current device: {device}")
        # if device=="GPU":
        self.ort_session = ort.InferenceSession(model_path, sess_options=session_options, providers=['CUDAExecutionProvider'])   
        #     self.onnx_session.set_providers(['CUDAExecutionProvider'], [ {'device_id': device_id}])
        # else:
 
        #self.ort_session = ort.InferenceSession(model_path, sess_options=session_options, providers=['CPUExecutionProvider']) 
        
        self.img0_name = self.ort_session.get_inputs()[0].name
        self.img1_name = self.ort_session.get_inputs()[1].name
        print("输入名:",self.img0_name)
        print("输入名:",self.img1_name)
        print("输入形状:",self.ort_session.get_inputs()[0].shape)
        self.mkpts0_f = self.ort_session.get_outputs()[0].name
        self.mkpts1_f = self.ort_session.get_outputs()[1].name
        self.mconf = self.ort_session.get_outputs()[2].name

    def predict(self, last_data):
        mkpts0_f, mkpts1_f, mconf=self.ort_session.run(output_names=[self.mkpts0_f, self.mkpts1_f, self.mconf], input_feed={self.img0_name: last_data['image0'], self.img1_name: last_data['image1']})
        return mkpts0_f, mkpts1_f, mconf