#----------------------------------------------------#
#   将单张图片预测、摄像头检测和FPS测试功能
#   整合到了一个py文件中，通过指定mode进行模式的修改。
#----------------------------------------------------#
import time
import argparse
import cv2
import numpy as np
from PIL import Image

from models.detectors.DETECTOR import DETECTOR

from settings import set_dataset, set_computing_env

def set_inference_configuration():
    parser = argparse.ArgumentParser()
    # dataset configuration
    parser.add_argument('--dataset', default='NWPUv2', type=str, help='Dataset:NWPUv1, NWPUv2, DIOR')
    parser.add_argument('--dataset_path', type=str, default='dataset/NWPUv2')
    
    # detector configuration
    parser.add_argument('--detector', type=str, default='FasterRCNN',help='Detector:FasterRCNN, MetaRCNN')
    parser.add_argument('--model_path', type=str, default='exp_test/frcn_bs16_100e_gpu0/best_epoch_weights.pth', help='Detector weight path')
    parser.add_argument('--mode', type=str, default='inference',help='mode: train, inference')
    parser.add_argument('--resume', type=bool, default=False)
    parser.add_argument('--ckpt_path', type=str, default=None)

    parser.add_argument('--backbone', type=str, default='resnet50', help='Backbone: resnet50, resnet101')
    # TODO 支持多种不同的pretrain backbone加载，修改模型的url
    parser.add_argument('--pretrained', type=bool, default=False, help='use pretrained backbone') 
    parser.add_argument('--neck', type=str, default='FPN', help='Neck: FPN' ) 
    parser.add_argument('--rpn_head', type=str, default='RPN', help='Densehead: RPN') 
    parser.add_argument('--anchor_scales', type=list, default=[4, 8, 16], help='anchor的尺度') 
    parser.add_argument('--anchor_ratios', type=list, default=[0.5, 1, 2], help='anchor生成的比例') 
    parser.add_argument('--feat_stride', type=int, default = 16) 
    parser.add_argument('--roi_head', type=str, default='Resnet50RoIHead') 

    parser.add_argument('--nms_iou', type=int, default=0.3)
    parser.add_argument('--confidence', type=int, default=0.5)
    parser.add_argument('--score_threhold', type=int, default=0.5)
    # computing env
    parser.add_argument('--seed', type=int, default=2023)
    parser.add_argument('--gpu_id', type=list, default=[0], help='None: cpu')

    # inference task
    parser.add_argument('--task', type=str, default='dir_predict',help='Task:img_predict, dir_predict,video_predict, onnx_convert')
    
    # img_predict task
    parser.add_argument('--crop', type=bool, default=True)
    parser.add_argument('--count', type=str, default=True)

    # dir_predict task
    parser.add_argument('--dir_origin_path', type=str, default='exps/map_out/images-test')
    parser.add_argument('--dir_save_path', type=str, default='exps/infer_results')

    # video_predict task
    parser.add_argument('--video_path', type=int, default=0, help='0:camera, others:videos')
    parser.add_argument('--video_save_path', type=str, default='exps/video_results')
    parser.add_argument('--video_fps', type=int, default=30)

    # onnx
    parser.add_argument('--onnx_save_path', type=str, default=None)

    return parser.parse_args()

if __name__ == "__main__":
    print("\n\033[1;36;40mStart Inference....................\033[0m")

    args = set_inference_configuration()

    args.device, args.cuda = set_computing_env(args)

    args.input_shape, args.class_names, args.num_classes,  num_train, num_val, num_test, train_lines, val_lines, test_lines = set_dataset(args)

    detector = DETECTOR(args)

    mode = args.task

    if mode == "img_predict":
        while True:
            img = input('Input image filename:')
            try:
                image = Image.open(img)
            except:
                print('Open Error! Try again!')
                continue
            else:
                r_image = detector.detect_image(image, crop = args.crop, count = args.count)
                r_image.show()
                r_image.save('result.jpg')

    elif mode == "video_predict":
        capture=cv2.VideoCapture(args.video_path)
        if args.video_save_path!="":
            fourcc = cv2.VideoWriter_fourcc(*'XVID')
            size = (int(capture.get(cv2.CAP_PROP_FRAME_WIDTH)), int(capture.get(cv2.CAP_PROP_FRAME_HEIGHT)))
            out = cv2.VideoWriter(args.video_save_path, fourcc, args.video_fps, size)

        fps = 0.0
        while(True):
            t1 = time.time()
            # 读取某一帧
            ref,frame=capture.read()
            # 格式转变，BGRtoRGB
            frame = cv2.cvtColor(frame,cv2.COLOR_BGR2RGB)
            # 转变成Image
            frame = Image.fromarray(np.uint8(frame))
            # 进行检测
            frame = np.array(detector.detect_image(frame))
            # RGBtoBGR满足opencv显示格式
            frame = cv2.cvtColor(frame,cv2.COLOR_RGB2BGR)
            
            fps  = ( fps + (1./(time.time()-t1)) ) / 2
            print("fps= %.2f"%(fps))
            frame = cv2.putText(frame, "fps= %.2f"%(fps), (0, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
            
            cv2.imshow("video",frame)
            c= cv2.waitKey(1) & 0xff 
            if args.video_save_path!="":
                out.write(frame)

            if c==27:
                capture.release()
                break
        capture.release()
        out.release()
        cv2.destroyAllWindows()

    elif mode == "dir_predict":
        import os
        from tqdm import tqdm
        img_names = open(os.path.join(args.dataset_path, "ImageSets/Main/test.txt")).read().strip().split()
        for img_name in tqdm(img_names):
            img_name = img_name + '.jpg'
            if img_name.lower().endswith(('.bmp', '.dib', '.png', '.jpg', '.jpeg', '.pbm', '.pgm', '.ppm', '.tif', '.tiff')):
                image_path  = os.path.join(args.dir_origin_path, img_name)
                image       = Image.open(image_path)
                r_image     = detector.detect_image(image)
                if not os.path.exists(args.dir_save_path):
                    os.makedirs(args.dir_save_path)
                r_image.save(os.path.join(args.dir_save_path, img_name.replace(".jpg", ".png")), quality=95, subsampling=0)
    # TODO
    # elif mode == "onnx":
    #     Simplify = True
    #     detector.convert_to_onnx(Simplify, args.onnx_save_path)

    else:
        raise AssertionError("Please specify the correct mode.")
