"""
baseline for 1st Anti-UAV
https://anti-uav.github.io/
Qiang Wang
2020.02.16
"""
from __future__ import absolute_import
import os
import glob
import json
import cv2
import numpy as np

from siamfc import TrackerSiamFC
faster_rcnn_det_path='/home/yuwentao/project/faster-rcnn.pytorch/data/VOCdevkit2007/results/VOC2007/Main/epoch8_comp4_det_test_uav.txt'
test_path='/home/yuwentao/data/dataset/test-dev'
det=[]#获取test数据集 第0项：image_name 第一项：score 第2-5项：bbox
image_folder_set=[]#获取image name list
det_dict={}
threshold=0.9 #设定score阈值为0.9
def read(det_path):

    f=open(det_path,'r')
    lines=f.readlines()
    imagename=[]
    for i,line in enumerate(lines):
        # if(i%100==0):
        #     det.append(line.strip().split(" "))
        tmp_line=line.strip().split(" ")
        tmp_name=tmp_line[0]
        if tmp_name not in imagename:#获取100里的第一项
            imagename.append(tmp_name)
            det.append(tmp_line)
        else:
            continue
    for i in det:
        image_folder=i[0].split('+')[-2]
        dic={'image_name':i[0],'score':float(i[1]),'bbox':list(map(float,i[2:]))}
        if(image_folder not in image_folder_set):
            image_folder_set.append(image_folder)
            det_dict[image_folder]=[]
        det_dict[image_folder].append(dic)

def iou(bbox1, bbox2):
    """
    Calculates the intersection-over-union of two bounding boxes.
    Args:
        bbox1 (numpy.array, list of floats): bounding box in format x,y,w,h.
        bbox2 (numpy.array, list of floats): bounding box in format x,y,w,h.
    Returns:
        int: intersection-over-onion of bbox1, bbox2
    """

    bbox1 = [float(x) for x in bbox1]
    bbox2 = [float(x) for x in bbox2]

    (x0_1, y0_1, w1_1, h1_1) = bbox1
    (x0_2, y0_2, w1_2, h1_2) = bbox2
    x1_1 = x0_1 + w1_1
    x1_2 = x0_2 + w1_2
    y1_1 = y0_1 + h1_1
    y1_2 = y0_2 + h1_2
    # get the overlap rectangle
    overlap_x0 = max(x0_1, x0_2)
    overlap_y0 = max(y0_1, y0_2)
    overlap_x1 = min(x1_1, x1_2)
    overlap_y1 = min(y1_1, y1_2)

    # check if there is an overlap
    if overlap_x1 - overlap_x0 <= 0 or overlap_y1 - overlap_y0 <= 0:
        return 0

    # if yes, calculate the ratio of the overlap to each ROI size and the unified size
    size_1 = (x1_1 - x0_1) * (y1_1 - y0_1)
    size_2 = (x1_2 - x0_2) * (y1_2 - y0_2)
    size_intersection = (overlap_x1 - overlap_x0) * (overlap_y1 - overlap_y0)
    size_union = size_1 + size_2 - size_intersection

    return size_intersection / size_union


def not_exist(pred):
    return len(pred) == 1 and pred == 0


def eval(out_res, label_res):
    measure_per_frame = []
    for _pred, _gt, _exist in zip(out_res, label_res['gt_rect'], label_res['exist']):
        measure_per_frame.append(not_exist(_pred) if not _exist else iou(_pred, _gt))
    return np.mean(measure_per_frame)
def eval1(out_res,label_res):
    measure_per_frame = []
    for _pred_bbox,_score,_gt in zip(out_res['bbox'],out_res['score'],label_res['gt_rect']):
        #如果检测分数≤阈值或者_gt不存在则不计算iou
        measure_per_frame.append(not_exist(_pred_bbox) if (_score<=threshold or _gt==[]) else iou(_pred_bbox, _gt))
    return np.mean(measure_per_frame)

def main(mode='IR', visulization=False):
    assert mode in ['IR', 'RGB'], 'Only Support IR or RGB to evalute'
    # setup tracker
    net_path = 'model.pth'
    tracker = TrackerSiamFC(net_path=net_path)

    # setup experiments
    video_paths = glob.glob(os.path.join('dataset', 'test-dev', '*'))
    video_num = len(video_paths)
    output_dir = os.path.join('results', tracker.name)
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)
    overall_performance = []

    # run tracking experiments and report performance
    for video_id, video_path in enumerate(video_paths, start=1):
        video_name = os.path.basename(video_path)
        video_file = os.path.join(video_path, '%s.mp4'%mode)
        res_file = os.path.join(video_path, '%s_label.json'%mode)
        with open(res_file, 'r') as f:
            label_res = json.load(f)

        init_rect = label_res['gt_rect'][0]
        capture = cv2.VideoCapture(video_file)

        frame_id = 0
        out_res = []
        while True:
            ret, frame = capture.read()
            if not ret:
                capture.release()
                break
            if frame_id == 0:
                tracker.init(frame, init_rect)  # initialization
                out = init_rect
                out_res.append(init_rect)
            else:
                out = tracker.update(frame)  # tracking
                out_res.append(out.tolist())
            if visulization:
                _gt = label_res['gt_rect'][frame_id]
                _exist = label_res['exist'][frame_id]
                if _exist:
                    cv2.rectangle(frame, (int(_gt[0]), int(_gt[1])), (int(_gt[0] + _gt[2]), int(_gt[1] + _gt[3])),
                                  (0, 255, 0))
                cv2.putText(frame, 'exist' if _exist else 'not exist',
                            (frame.shape[1] // 2 - 20, 30), 1, 2, (0, 255, 0) if _exist else (0, 0, 255), 2)

                cv2.rectangle(frame, (int(out[0]), int(out[1])), (int(out[0] + out[2]), int(out[1] + out[3])),
                              (0, 255, 255))
                cv2.imshow(video_name, frame)
                cv2.waitKey(1)
            frame_id += 1
        if visulization:
            cv2.destroyAllWindows()
        # save result
        output_file = os.path.join(output_dir, '%s_%s.txt' % (video_name, mode))
        with open(output_file, 'w') as f:
            json.dump({'res': out_res}, f)

        mixed_measure = eval(out_res, label_res)
        overall_performance.append(mixed_measure)
        print('[%03d/%03d] %20s %5s Fixed Measure: %.03f' % (video_id, video_num, video_name, mode, mixed_measure))

    print('[Overall] %5s Mixed Measure: %.03f\n' % (mode, np.mean(overall_performance)))

def main1(mode='IR', visulization=False):
    assert mode in ['IR', 'RGB'], 'Only Support IR or RGB to evalute'
    # setup tracker
    net_path = 'model.pth'
    tracker = TrackerSiamFC(net_path=net_path)

    video_paths=os.listdir(test_path)
    output_dir = os.path.join('results', tracker.name)
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)
    overall_performance = []

    for video_id,video_name in enumerate(image_folder_set):
        # if (video_name=="20190925_210802_1_2"):
        #     import pdb;pdb.set_trace()
        video_path_name=os.path.join(test_path,video_name)#图片路径
        video_file=os.path.join(video_path_name,'%s.mp4'%mode)
        res_file=os.path.join(video_path_name,'%s_label.json'%mode)
        with open (res_file, 'r') as f:
            label_res = json.load(f)#获取对应视频的json
        capture = cv2.VideoCapture(video_file)
        frame_id=0
        out_res=det_dict[video_name]#获取相应视频的det结果
        measure_per_frame = []
        while True:
            ret,frame=capture.read()
            if not ret:
                capture.release()
                break
            #for eval

            _gt = label_res['gt_rect'][frame_id]
            _det = out_res[frame_id]
            _pred_bbox, _score=_det['bbox'], _det['score']
            _pred_bbox[2]-=_pred_bbox[0]#xmax-xmin=w
            _pred_bbox[3]-=_pred_bbox[1]#ymax-ymin=h
            # 如果检测分数≤阈值或者_gt不存在则不计算iou
            measure_per_frame.append(not_exist(_pred_bbox) if (_score <= threshold or _gt == []) else iou(_pred_bbox, _gt))

            if visulization:

                #如果分数大于阈值则表示存在
                if( _gt!=[]):#绘制对应帧gt的框
                    cv2.rectangle(frame, (int(_gt[0]), int(_gt[1])), (int(_gt[0] + _gt[2]), int(_gt[1] + _gt[3])),
                                  (0, 255, 0))
                if (_det['score']>threshold):#如果score大于阈值则表示exist
                    _exist=True
                    cv2.putText(frame, 'exist' if _exist else 'not exist',
                                (frame.shape[1] // 2 - 20, 30), 1, 2, (0, 255, 0) if _exist else (0, 0, 255), 2)
                #绘制对应帧的检测框
                cv2.rectangle(frame, (int(_det['bbox'][0]), int(_det['bbox'][1])), (int(_det['bbox'][2]), int(_det['bbox'][3])),
                              (0, 255, 255))
                #cv2.imwrite('test1.jpg',frame)
                cv2.waitKey(1)
            frame_id+=1
        mixed_measure=np.mean(measure_per_frame)
        overall_performance.append(mixed_measure)
        print(video_id,video_name,mixed_measure)
        #import pdb;pdb.set_trace()
        # mixed_measure=eval1(out_res,label_res)

    print('[Overall] %5s Mixed Measure: %.03f\n' % (mode, np.mean(overall_performance)))












if __name__ == '__main__':

    read(faster_rcnn_det_path)

    main1(mode='IR', visulization=True)
