# -*- coding=utf-8 -*-
import hiai
from hiai.nn_tensor_lib import DataType
from utils.utils import CreateGraphWithoutDVPP, GraphInference
from utils import jpegHandler
from voc_eval.voc0712 import VOCDetection, AnnotationTransform
import os
import sys
import numpy as np
import time
import argparse



def get_args():
    parser = argparse.ArgumentParser(description='Faster R-CNN test script')
    parser.add_argument('--kCategoryIndex',
                        default=2,
                        help='number to correct class idx', type=int)
    parser.add_argument('--dvppInWidth',
                        default=512,
                        help='input image width, 128*int ', type=int)
    parser.add_argument('--dvppInHeight',
                        default=512,
                        help='input image height, 16*int', type=int)
    parser.add_argument('--omFileName',
                        default='./models/vgg_ssd.om',
                        help='offline model path')
    parser.add_argument('--srcFileDir',
                        default='./ImageNetRaw/',
                        help='img to detect')
    parser.add_argument('-d', '--dataset', default='VOC',
                        help='VOC or COCO version')
    parser.add_argument('--save_folder', default='eval/', type=str,
                        help='Dir to save results')
    parser.add_argument('--dstFileDir', default='./Result/vgg_ssd', type=str,
                        help='Dir to save image results')
    parser.add_argument('--retest', default=True, type=bool,
                        help='test cache results')
    args = parser.parse_args()

    if not os.path.isfile(args.omFileName):
        print "{} does not exist..., Exit!!!".format(args.omFileName)
        sys.exit(-1)

    return args


def test_net(save_folder, net, testset, max_per_image=300, thresh=0.005):

    if not os.path.exists(save_folder):
        os.mkdir(save_folder)
    # dump predictions and assoc. ground truth to text file for now
    num_images = len(testset)
    num_classes = 21
    all_boxes = [[[] for _ in range(num_images)]
                 for _ in range(num_classes)]
    det_file = os.path.join(save_folder, 'detections.pkl')
    if args.retest:
        f = open(det_file, 'rb')
        all_boxes = pickle.load(f)
        print('Evaluating detections')
        testset.evaluate_detections(all_boxes, save_folder)
        return
    # in this sample demo, the faster-rcnn  model requires 896*608 images
    # dvppInWidth = 256
    # dvppInHeight = 224
    dvppInWidth = args.dvppInWidth
    dvppInHeight = args.dvppInHeight

    inference_time = 0
    index = 0
    print '------------------->start<-------------------'
    for i in range(num_images):
        child = testset.pull_image(i)
        input_image = jpegHandler.jpeg2yuv(child, dvppInWidth, dvppInHeight)
        img_rows, img_cols, img_channel = int(input_image.shape[0] / 1.5), input_image.shape[1], 3
        scale_width = img_cols / float(256)
        scale_height = img_rows / float(224)
        inputImageTensor = hiai.NNTensor(input_image, dvppInWidth, dvppInHeight, 3, 'testImage', DataType.UINT8_T,
                                         dvppInWidth * dvppInHeight * 3 / 2)

        datalist = [inputImageTensor]
        nntensorList = hiai.NNTensorList(datalist)
        if not nntensorList:
            print("nntensorList is null")
            break
        start_inference = time.time()
        resultList = GraphInference(myGraph, nntensorList)
        end_inference = time.time()
        inference_time += (end_inference - start_inference)

        if resultList is None:
            print("graph inference failed")
            continue
        #print(resultList[1].shape)
        tensor_num = np.reshape(resultList[0], 32)
        tensor_bbox = np.reshape(resultList[1], (64, 304, 8))
        # PostProcess(resultList, dstFileDir, child)

        for j in range(1, num_classes):
            num = int(tensor_num[j])
            if num == 0:
                all_boxes[j][i] = np.empty([0, 5], dtype=np.float32)
                continue
            c_dets = np.zeros((num, 5))
            for bbox_idx in range(num):
                class_idx = j * args.kCategoryIndex
                # lt_x = scale_width * tensor_bbox[class_idx][bbox_idx][0]
                # lt_y = scale_height * tensor_bbox[class_idx][bbox_idx][1]
                # rb_x = scale_width * tensor_bbox[class_idx][bbox_idx][2]
                # rb_y = scale_height * tensor_bbox[class_idx][bbox_idx][3]
                # score = tensor_bbox[class_idx][bbox_idx][4]
                c_dets[bbox_idx][0] = scale_width * tensor_bbox[class_idx][bbox_idx][0]
                c_dets[bbox_idx][1] = scale_height * tensor_bbox[class_idx][bbox_idx][1]
                c_dets[bbox_idx][2] = scale_width * tensor_bbox[class_idx][bbox_idx][2]
                c_dets[bbox_idx][3] = scale_height * tensor_bbox[class_idx][bbox_idx][3]
                c_dets[bbox_idx][4] = tensor_bbox[class_idx][bbox_idx][4]
            all_boxes[j][i] = c_dets
        if max_per_image > 0:
            image_scores = np.hstack([all_boxes[j][i][:, -1] for j in range(1,num_classes)])
            if len(image_scores) > max_per_image:
                image_thresh = np.sort(image_scores)[-max_per_image]
                for j in range(1, num_classes):
                    keep = np.where(all_boxes[j][i][:, -1] >= image_thresh)[0]
                    all_boxes[j][i] = all_boxes[j][i][keep, :]


        if i % 20 == 0:
            print('im_detect: {:d}/{:d}, {:.3f}s' .format(i + 1, num_images, (end_inference - start_inference)))


    with open(det_file, 'wb') as f:
        pickle.dump(all_boxes, f, pickle.HIGHEST_PROTOCOL)

    print('Evaluating detections')
    testset.evaluate_detections(all_boxes, save_folder)

    print('inference cost time ' + str(inference_time * 1000) + 'ms')

    hiai.hiai._global_default_graph_stack.get_default_graph().destroy()

    print '------------------->End<-------------------'


if __name__ == "__main__":
    args = get_args()
    if not os.path.exists(args.dstFileDir):
        os.mkdir(args.dstFileDir)
    if not os.path.exists(args.save_folder):
        os.mkdir(args.save_folder)
    inferenceModel = hiai.AIModelDescription('vgg-ssd', args.omFileName)
    print(args.omFileName)
    # print(inferenceModel)
    # we will resize the jpeg to 896*608 to meet faster-rcnn requirement via opencv,
    # so DVPP resizing is not needed
    myGraph = CreateGraphWithoutDVPP(inferenceModel)
    if myGraph is None:
        print("CreateGraph failed")
        exit(0)
    args = get_args()
    # load data
    testset = VOCDetection(args.VOCroot, [('2007', 'test')], None, AnnotationTransform())
    # top_k = (300, 200)[args.dataset == 'COCO']
    top_k = 200
    save_folder = os.path.join(args.save_folder, args.dataset)
    test_net(save_folder, myGraph, testset, top_k, thresh=0.01)

