# -*- coding=utf-8 -*-
import hiai
from hiai.nn_tensor_lib import DataType
from utils.utils import CreateGraphWithoutDVPP, GraphInference
from utils import jpegHandler
from utils.postprocess import SSD_PostProcess_with_plt
from utils.utils import coco_index_to_class
from voc_eval.voc0712 import VOCDetection, AnnotationTransform
import os
import sys
import time
import argparse



def get_args():
    parser = argparse.ArgumentParser(description='Faster R-CNN test script')
    parser.add_argument('--kCategoryIndex',
                        default=2,
                        help='number to correct class idx', type=int)
    parser.add_argument('--dvppInWidth',
                        default=512,
                        help='input image width, 128*int ', type=int)
    parser.add_argument('--dvppInHeight',
                        default=512,
                        help='input image height, 16*int', type=int)
    parser.add_argument('--omFileName',
                        default='./models/vgg_ssd.om',
                        help='offline model path')
    parser.add_argument('--args.srcFileDir',
                        default='./ImageNetRaw/',
                        help='img to detect')
    parser.add_argument('-d', '--dataset', default='VOC',
                        help='VOC or COCO version')
    parser.add_argument('--save_folder', default='eval/', type=str,
                        help='Dir to save results')
    parser.add_argument('--dstFileDir', default='./Result/faster_rcnn', type=str,
                        help='Dir to save image results')
    parser.add_argument('--retest', default=True, type=bool,
                        help='test cache results')
    args = parser.parse_args()

    if not os.path.isfile(args.omFileName):
        print "{} does not exist..., Exit!!!".format(args.omFileName)
        sys.exit(-1)

    return args



def main():
    # capture_flag = 1  # use camera or not
    inferenceModel = hiai.AIModelDescription('faster-rcnn', args.omFileName)
    print 'offline model path ----> {}'.format(args.omFileName)
    # print inferenceModel
    # we will resize the jpeg to 896*608 to meet faster-rcnn requirement via opencv,
    # so DVPP resizing is not needed
    myGraph = CreateGraphWithoutDVPP(inferenceModel)
    if myGraph is None:
        print "CreateGraph failed"
        return None

    # in this sample demo, the faster-rcnn  model requires 896*608 images
    dvppInWidth = args.dvppInWidth
    dvppInHeight = args.dvppInHeight

    start = time.time()
    index = 0
    inference_time = 0
    testset = VOCDetection(args.VOCroot, [('2007', 'test')], None, AnnotationTransform())
    print '------------------->start<-------------------'
    for i in range(len(testset)):
        # child = os.path.join('%s%s' % (srcFileDir, allDir))
        child = testset.pull_image(i)
        print '({}):{}'.format(i, child)
        if (not jpegHandler.is_img(child)):
            print '[info] file : ' + child + ' is not image !'
            continue

        # read the jpeg file and resize it to required w&h, than change it to YUV format.
        input_image = jpegHandler.jpeg2yuv(child, dvppInWidth, dvppInHeight)

        inputImageTensor = hiai.NNTensor(input_image, dvppInWidth, dvppInHeight, 3, 'testImage', DataType.UINT8_T,
                                         dvppInWidth * dvppInHeight * 3 / 2)
        datalist = [inputImageTensor]

        nntensorList = hiai.NNTensorList(datalist)
        if not nntensorList:
            print "nntensorList is null"
            break

        start_inference = time.time()
        resultList = GraphInference(myGraph, nntensorList)
        end_inference = time.time()
        inference_time += (end_inference - start_inference)

        if resultList is None:
            print "graph inference failed"
            continue
        # if you want save images, release here
        SSD_PostProcess_with_plt(resultList, args.dstFileDir, child, coco_index_to_class)
        
        if i % 20 == 0:
            print('im_detect: {:d}/{:d}, inference_time: {:.3f}s' .format(i + 1, len(testset), (end_inference - start_inference)))

    end = time.time()


    hiai.hiai._global_default_graph_stack.get_default_graph().destroy()

    print '------------------->End<-------------------'

    print 'cost time ' + str((end - start) * 1000) + 'ms'


if __name__ == "__main__":
    args = get_args()
    if not os.path.exists(args.dstFileDir):
        os.mkdir(args.dstFileDir)
    if not os.path.exists(args.save_folder):
        os.mkdir(args.save_folder)
    main()


