#!/usr/bin/env python
# -*- encoding: utf-8 -*-
'''
@File    :   predict.py
@Time    :   2021/11/07 12:53:40
@Author  :   Yaadon 
'''

# here put the import lib
import json
import os
from utils.nms_utils import multiclass_nms
import paddle
from net.yolov3 import YOLOv3
from data.loader import test_data_loader, single_image_data_loader
from toolkit.drawer import draw_results
import argparse
from train import ANCHOR_MASKS, ANCHORS, NUM_CLASSES

VALID_THRESH = 0.01
NMS_TOPK = 400
NMS_POSK = 100
NMS_THRESH = 0.45

def create_parser():
    # 实例化
    # 创建一个 ArgumentParser 对象
    # ArgumentParser 对象包含将命令行解析成 Python 数据类型所需的全部信息。
    parser = argparse.ArgumentParser(description = 'Insect recognition program.')

    parser.add_argument('-a', '--all', help='Predict all images.', action="store_true")
    parser.add_argument('-s', '--single', help='Predict a single image.', action="store_true")
    args = parser.parse_args()
    return args

if __name__ == '__main__':
    args = create_parser()
    params_file_path = './yolo_epoch35'
    if args.all:
        TRAINDIR = './insects/train/images'
        TESTDIR = './insects/test/images'
        VALIDDIR = './insects/val'

        model = YOLOv3(num_classes=NUM_CLASSES)
        model_state_dict = paddle.load(params_file_path)
        model.load_dict(model_state_dict)
        model.eval()

        total_results = []
        test_loader = test_data_loader(TESTDIR, batch_size= 1, mode='test')
        for i, data in enumerate(test_loader()):
            img_name, img_data, img_scale_data = data
            img = paddle.to_tensor(img_data)
            img_scale = paddle.to_tensor(img_scale_data)

            outputs = model.forward(img)
            bboxes, scores = model.get_pred(outputs,
                                     im_shape=img_scale,
                                     anchors=ANCHORS,
                                     anchor_masks=ANCHOR_MASKS,
                                     valid_thresh = VALID_THRESH)

            bboxes_data = bboxes.numpy()
            scores_data = scores.numpy()
            result = multiclass_nms(bboxes_data, scores_data,
                          score_thresh=VALID_THRESH, 
                          nms_thresh=NMS_THRESH, 
                          pre_nms_topk=NMS_TOPK, 
                          pos_nms_topk=NMS_POSK)
            for j in range(len(result)):
                result_j = result[j]
                img_name_j = img_name[j]
                total_results.append([img_name_j, result_j.tolist()])
            print('processed {} pictures'.format(len(total_results)))

        print('')
        json.dump(total_results, open('pred_results.json', 'w'))
    
    elif args.single:
        image_name = './insects/test/images/2495.jpeg'
        # image_name = './insects/test/images/2599.jpeg'


        model = YOLOv3(num_classes=NUM_CLASSES)
        model_state_dict = paddle.load(params_file_path)
        model.load_dict(model_state_dict)
        model.eval()

        total_results = []
        test_loader = single_image_data_loader(image_name, mode='test')
        for i, data in enumerate(test_loader()):
            img_name, img_data, img_scale_data = data
            img = paddle.to_tensor(img_data)
            img_scale = paddle.to_tensor(img_scale_data)

            outputs = model.forward(img)
            bboxes, scores = model.get_pred(outputs,
                                     im_shape=img_scale,
                                     anchors=ANCHORS,
                                     anchor_masks=ANCHOR_MASKS,
                                     valid_thresh = VALID_THRESH)

            bboxes_data = bboxes.numpy()
            scores_data = scores.numpy()
            results = multiclass_nms(bboxes_data, scores_data,
                          score_thresh=VALID_THRESH, 
                          nms_thresh=NMS_THRESH, 
                          pre_nms_topk=NMS_TOPK, 
                          pos_nms_topk=NMS_POSK)

        result = results[0]
        # print("result:",result)
        draw_results(result, image_name, draw_thresh=0.5)

