import jetson.inference
import jetson.utils
import cv2

def recognition_init(model_path, label_path, width, height, input_dev='/dev/video0'):
    # adjust width,height and model path
    infolist = ['my_imagenet.py', '--model='+model_path, '--labels='+label_path, '--input_blob=input_0', '--output_blob=output_0', input_dev, '--width='+width, '--height='+height]
    # load the recognition network
    net = jetson.inference.imageNet('resnet18', infolist)
    # create video sources & outputs
    input = jetson.utils.videoSource(infolist[5], argv=infolist)
    output = jetson.utils.videoOutput('', argv=infolist+[''])
    #font = jetson.utils.cudaFont()
    return net, input, output

def result_get(net, input, output):
    # process frames until the user exits
	# capture the next image
    rgb_img = input.Capture()
	# classify the image
    class_id, confidence = net.Classify(rgb_img)
	# find the object description
    class_desc = net.GetClassDesc(class_id)

    # convert to BGR, since that's what OpenCV expects
    bgr_img = jetson.utils.cudaAllocMapped(width=rgb_img.width, height=rgb_img.height, format='bgr8')
    jetson.utils.cudaConvertColor(rgb_img, bgr_img)
    # make sure the GPU is done work before we convert to cv2
    jetson.utils.cudaDeviceSynchronize()

    # convert to cv2 image (cv2 images are numpy arrays)
    cv_img = jetson.utils.cudaToNumpy(bgr_img)
    return cv_img, confidence, class_desc 


if __name__=='__main__':
    model_path = '/home/kx/jetson-inference/python/training/classification/models/garbage/resnet18.onnx'
    label_path = '/home/kx/jetson-inference/python/training/classification/data/garbage/labels.txt'
    net,input,output = recognition_init(model_path, label_path, '640','360')
    while True:
        cv_img, confidence, class_desc = result_get(net, input, output)
        cv2.imshow('cv_img',cv_img)
        c =cv2.waitKey(1)
        if c == 27:
            break