# -*- coding=utf-8 -*-
import hiai
from hiai.nn_tensor_lib import DataType
from utils.utils import CreateGraphWithoutDVPP, GraphInference
from utils import jpegHandler
from utils.postprocess import SSD_PostProcess_with_plt
from utils.utils import coco_index_to_class
import os
import numpy as np
import time
import argparse


def get_args():
    parser = argparse.ArgumentParser(description='Faster R-CNN test script')
    parser.add_argument('--kCategoryIndex',
                        default=2,
                        help='number to correct class idx', type=int)
    parser.add_argument('--dvppInWidth',
                        default=512,
                        help='input image width, 128*int ', type=int)
    parser.add_argument('--dvppInHeight',
                        default=512,
                        help='input image height, 16*int', type=int)
    parser.add_argument('--omFileName',
                        default='./models/vgg_ssd.om',
                        help='offline model path')
    parser.add_argument('--srcFileDir',
                        default='./ImageNetRaw/',
                        help='img to detect')
    parser.add_argument('-d', '--dataset', default='VOC',
                        help='VOC or COCO version')
    parser.add_argument('--save_folder', default='eval/', type=str,
                        help='Dir to save results')
    parser.add_argument('--dstFileDir', default='./Result/vgg_ssd', type=str,
                        help='Dir to save image results')
    parser.add_argument('--retest', default=True, type=bool,
                        help='test cache results')
    args = parser.parse_args()

    return args


def main():
    # capture_flag = 1  # use camera or not
    inferenceModel = hiai.AIModelDescription('faster-rcnn', args.omFileName)
    print('offline model path ----> {}'.format(args.omFileName))
    # print(inferenceModel
    # we will resize the jpeg to 896*608 to meet faster-rcnn requirement via opencv,
    # so DVPP resizing is not needed
    myGraph = CreateGraphWithoutDVPP(inferenceModel)
    if myGraph is None:
        print("CreateGraph failed")
        return None

    # in this sample demo, the faster-rcnn  model requires 896*608 images
    dvppInWidth = args.dvppInWidth
    dvppInHeight = args.dvppInHeight

    start = time.time()
    index = 0
    print('------------------->start<-------------------')
    pathDir = sorted(os.listdir(args.srcFileDir))
    for idx, img_name in enumerate(pathDir):
        child = os.path.join('%s%s' % (args.srcFileDir, img_name))
        print('({}):{}'.format(idx, child))
        if (not jpegHandler.is_img(child)):
            print('[info] file : ' + child + ' is not image !')
            continue

        # read the jpeg file and resize it to required w&h, than change it to YUV format.
        input_image = jpegHandler.jpeg2yuv(child, dvppInWidth, dvppInHeight)

        inputImageTensor = hiai.NNTensor(input_image, dvppInWidth, dvppInHeight, 3, 'testImage', DataType.UINT8_T,
                                         dvppInWidth * dvppInHeight * 3 / 2)
        datalist = [inputImageTensor]

        nntensorList = hiai.NNTensorList(datalist)
        if not nntensorList:
            print("nntensorList is null")
            break
        resultList = GraphInference(myGraph, nntensorList)

        if resultList is None:
            print("graph inference failed")
            continue
        # print(resultList[0].shape
        SSD_PostProcess_with_plt(resultList, args.dstFileDir, child, coco_index_to_class)

    end = time.time()
    print('cost time ' + str((end - start) * 1000) + 'ms')

    hiai.hiai._global_default_graph_stack.get_default_graph().destroy()

    print('------------------->End<-------------------')


if __name__ == "__main__":
    args = get_args()
    if not os.path.exists(args.dstFileDir):
        os.mkdir(args.dstFileDir)
    if not os.path.exists(args.save_folder):
        os.mkdir(args.save_folder)
    main()

