# -*- coding:utf-8 -*-
import os, sys
import os.path as osp
import fnmatch
import shutil
from tqdm import tqdm
import os, glob, json, copy
from scipy.io import loadmat
from collections import defaultdict
from multiprocessing import Pool
import numpy as np
import cv2
from lxml import etree, objectify
import xml.etree.ElementTree as ET

from tool import filesystem, via_tool, opencv_tool



def instance2xml_base(anno, bbox_type='xyxy'):
    """bbox_type: xyxy (xmin, ymin, xmax, ymax); xywh (xmin, ymin, width, height)"""
    assert bbox_type in ['xyxy', 'xywh']
    E = objectify.ElementMaker(annotate=False)
    anno_tree = E.annotation(
        E.folder('VOC2014_instance/person'),
        E.filename(anno['id']),
        E.source(
            E.database('Caltech pedestrian'),
            E.annotation('Caltech pedestrian'),
            E.image('Caltech pedestrian'),
            E.url('None')
        ),
        E.size(
            E.width(640),
            E.height(480),
            E.depth(3)
        ),
        E.segmented(0),
    )
    for index, bbox in enumerate(anno['bbox']):
        bbox = [float(x) for x in bbox]
        if bbox_type == 'xyxy':
            xmin, ymin, w, h = bbox
            xmax = xmin+w
            ymax = ymin+h
        else:
            xmin, ymin, xmax, ymax = bbox
        E = objectify.ElementMaker(annotate=False)
        anno_tree.append(
            E.object(
            E.name(anno['label']),
            E.bndbox(
                E.xmin(int(xmin)),
                E.ymin(int(ymin)),
                E.xmax(int(xmax)),
                E.ymax(int(ymax))
            ),
            E.difficult(0),
            E.occlusion(anno["occlusion"][index])
            )
        )
    return anno_tree


def parse_anno_file(vbb_inputdir, vbb_outputdir):
    # annotation sub-directories in hda annotation input directory
    assert os.path.exists(vbb_inputdir)
    sub_dirs = os.listdir(vbb_inputdir)     # 对应set00,set01...
    for sub_dir in sub_dirs:
        print("Parsing annotations of camera: ", sub_dir)
        cam_id = sub_dir
        # 获取某一个子set下面的所有vbb文件
        vbb_files = glob.glob(os.path.join(vbb_inputdir, sub_dir, "*.vbb"))
        for vbb_file in vbb_files:
            # 返回一个vbb文件中所有的帧的标注结果
            annos = vbb_anno2dict(vbb_file, cam_id)
            
            if annos:
                # 组成xml文件的存储文件夹，形如“/Users/chenguanghao/Desktop/Caltech/xmlresult/”
                vbb_outdir = vbb_outputdir
                                              
                # 如果不存在 vbb_outdir
                if not os.path.exists(vbb_outdir):
                    os.makedirs(vbb_outdir)

                for filename, anno in sorted(annos.items(), key=lambda x: x[0]):                  
                    if "bbox" in anno:
                        anno_tree = instance2xml_base(anno)
                        outfile = os.path.join(vbb_outdir, os.path.splitext(filename)[0]+".xml")
                        print("Generating annotation xml file of picture: ", filename)
                        # 生成最终的xml文件，对应一张图片+
                        etree.ElementTree(anno_tree).write(outfile, pretty_print=True)



def store_to_struct(xml_path, disable_label):
    in_file = open(xml_path)
    tree = ET.parse(in_file)
    root = tree.getroot()
    
    frames_cnt = int(root.find('meta').find("frames").text)
    line = [None for i_ in range(frames_cnt)]

    """ 存在序列不连续，断开的情况，特殊处理
    <box frame="369" xtl="577.56" ytl="139.92" xbr="588.05" ybr="159.56" cut="no" occluded="almost"/>
    <box frame="370" xtl="577.18" ytl="140.07" xbr="587.67" ybr="159.71" cut="no" occluded="almost"/>
    <box frame="475" xtl="586.64" ytl="136.53" xbr="598.70" ybr="158.10" cut="no" occluded="almost"/>
    <box frame="476" xtl="586.70" ytl="136.53" xbr="599.40" ybr="160.60" cut="no" occluded="almost"/>
    """
    data_struct = []
    tracks = root.findall('track')
    for idx,track in enumerate(tracks):
        boxs = track.findall('box')
        label = track.attrib["label"]
        id_ = track.attrib["id"]
        if label in disable_label: continue

        new_line = None
        prev_frame_cnt = -100
        for box in boxs:
            attr_dict = box.attrib
            frame_cnt = int(attr_dict["frame"])
            attr_dict["label"] = label
            attr_dict["id"] = id_

            x = int(eval(attr_dict["xtl"]))
            y = int(eval(attr_dict["ytl"]))
            w = int(eval(attr_dict["xbr"])) - x
            h = int(eval(attr_dict["ybr"])) - y
            attr_dict["xywh"] = [x,y,w,h]

            if frame_cnt - prev_frame_cnt != 1:
                if prev_frame_cnt > 0:
                    data_struct.append(new_line)
                new_line = copy.copy(line)

            new_line[frame_cnt] = attr_dict
            prev_frame_cnt = frame_cnt

        data_struct.append(new_line)

    return data_struct


def save_to_via(data_struct, img_dir, img_prefix_path, via_name="via_region_data.json"):
    via_dict = {}
    frame_cnt = 0
    if len(data_struct) > 0: 
        frame_cnt = len(data_struct[0])

    for frm_idx in range(frame_cnt):
        boxs = []
        for car_idx, line in enumerate(data_struct):
            attr_dict = data_struct[car_idx][frm_idx]
            if attr_dict == None: continue
            box = attr_dict["xywh"]
            box.append([["label", attr_dict["label"]], ["occluded", attr_dict["occluded"]], ["cut", attr_dict["cut"]], ["id", attr_dict["id"]]])
            boxs.append(box)

        img_path = img_prefix_path.format(frm_idx)
        if not osp.exists(img_path): continue
        name, item_dict = via_tool.gen_via_item_dict(img_path, "rect", boxs)
        via_dict[name] = item_dict

    with open(osp.join(img_dir, via_name), "w") as rf:
        rf.write(json.dumps(via_dict))

def filter_box_imp(data_struct, car_idx, frm_idx, filter_labels):
    box = data_struct[car_idx][frm_idx]["xywh"]

    for car_index, line in enumerate(data_struct):
        if car_index == car_idx: continue
        if line[frm_idx] == None: continue
        if line[frm_idx]["label"] not in filter_labels: continue

        box2 = line[frm_idx]["xywh"]
        ratio, _ = opencv_tool.iou_3(box, box2)
        if ratio < 0: continue

        if ratio > 0.4 and box[2]*box[3] < box2[2]*box2[3]:
            return 0

def remove_small_box(line, order, img_shape, filter_box_min_size):

    index = 0 if order == 0 else (len(line)-1)
    step = 1 if order == 0 else -1

    flag = False
    while not flag and index < len(line) and index >= 0:
        if line[index] == None:
            index+=step
            continue
        
        label = line[index]["label"]
        ratio_w = filter_box_min_size[label] * img_shape[1] / 1920
        ratio_h = filter_box_min_size[label] * img_shape[0] / 1080
        if line[index]["xywh"][2] < ratio_w or line[index]["xywh"][3] < ratio_h:
            # line[index]["label"] = "remove"
            line[index] = None
        else:
            flag = True
        index+=step

def remove_border_box(line, order, img_width, filter_labels):
    index = 0 if order == 0 else (len(line)-1)
    step = 1 if order == 0 else -1

    normal_width = 0
    right_in = False
    right_out = False
    left_in = False
    left_out = False
    while index < len(line) and index >= 0:
        if line[index] == None or line[index]["label"] not in filter_labels:
            index+=step
            continue
        
        # 右进
        if order == 0 and line[index]["xywh"][0] + line[index]["xywh"][2] >= img_width-10: 
            right_in = True
        # 右出
        elif order == 1 and line[index]["xywh"][0] + line[index]["xywh"][2] >= img_width-10:
            right_out = True
        # 左进
        elif order == 0 and line[index]["xywh"][0] <= 10: 
            left_in = True
        # 左出
        elif order == 1 and line[index]["xywh"][0] <= 10:
            left_out = True

        if right_in and line[index]["xywh"][0] + line[index]["xywh"][2] < img_width-10:
            normal_width = line[index]["xywh"][2]
            break
        elif right_out and line[index]["xywh"][0] + line[index]["xywh"][2] < img_width-10:
            normal_width = line[index]["xywh"][2]
            break
        elif left_in and line[index]["xywh"][0] > 10:
            normal_width = line[index]["xywh"][2]
            break
        elif left_out and line[index]["xywh"][0] > 10:
            normal_width = line[index]["xywh"][2]
            break
        index+=step

    if normal_width > 0:
        # print("normal_width:", normal_width)
        # while (order == 0 and index > 0) or (order == 1 and index < len(line)-1):
        while index < len(line) and index >= 0:
            if line[index] == None:
                index-=step
                continue
            label = line[index]["label"]
            if line[index]["xywh"][2] <= normal_width/2: 
                # line[index]["label"] = "remove"
                line[index] = None
            index-=step


def remove_error_ratio_box(line, order, filter_labels):
    index = 0 if order == 0 else (len(line)-1)
    step = 1 if order == 0 else -1

    start_flag = None
    while index < len(line) and index >= 0:
        if line[index] == None:
            index+=step
            continue
        label = line[index]["label"]
        if label not in filter_labels: 
            index+=step 
            continue

        if line[index]["xywh"][3] / line[index]["xywh"][2] > filter_labels[label]:
            start_flag = True
            line[index] = None
        elif start_flag:
            break

        index+=step

def remove_error_ratio_box2(line, order, filter_labels):
    index = 0
    step = 1

    start_flag = None
    while index < len(line) and index >= 0:
        if line[index] == None:
            index+=step
            continue
        label = line[index]["label"]
        if label not in filter_labels: 
            index+=step 
            continue

        if line[index]["xywh"][3] / line[index]["xywh"][2] > filter_labels[label]:
            start_flag = True
            line[index] = None
        # elif start_flag:
        #     break
        index+=step

def filter_box(data_struct, first_img_path):
    # frame_cnt = len(data_struct[0])
    img = cv2.imread(first_img_path)
    img_shape = img.shape[:2]
    print("img_shape:", img_shape)

    filter_box_min_size = {
        "open-tricycle": 24,
        "closed-tricycle": 24,
        "car": 24, 
        "van": 26, 
        "forklift": 26, 
        
        "bus": 42,
        "truck": 42
    }
    filter_dict = {
        "closed-tricycle": ["car", "van", "forklift", "open-tricycle", "closed-tricycle"],
        "open-tricycle": ["car", "van", "forklift", "open-tricycle", "closed-tricycle"],
        "forklift": ["car", "van", "forklift", "open-tricycle", "closed-tricycle"],
        "car": ["car", "van", "forklift", "open-tricycle", "closed-tricycle"],
        "van": ["car", "van", "forklift", "open-tricycle", "closed-tricycle"],
        "truck": ["truck", "bus"],
        "bus": ["truck", "bus"],
    }
    filter_dict2 = {
        "open-tricycle": 2.0,
        "closed-tricycle": 2.0,
        "car": 2.0, 
        "van": 2.0, 
        "forklift": 2.0, 
        "bus": 3.3,
        "truck": 3.3
    }
    filter_border_dict = {
        "open-tricycle": 1.5,
        "closed-tricycle": 1.5,
        "car": 1.5, 
        "van": 1.5, 
        "forklift": 1.5, 
        "bus": 3.3,
        "truck": 3.3
    }
    for car_idx, line in enumerate(data_struct):
        # # 去除序列前后较小的框
        remove_small_box(line, 0, img_shape, filter_box_min_size)
        remove_small_box(line, 1, img_shape, filter_box_min_size)

        # 去除序列前后边界框
        remove_border_box(line, 0, img_shape[1], filter_border_dict)
        remove_border_box(line, 1, img_shape[1], filter_border_dict)

        # 去除序列异形边界框 etc. car类别框h大于w的1.5倍
        # remove_error_ratio_box(line, 0, filter_dict2)
        # remove_error_ratio_box(line, 1, filter_dict2)
        remove_error_ratio_box2(line, 0, filter_dict2)

        for frm_idx, info in enumerate(line):
            if info is None: continue
            if info["label"] == "forklift": print("forklift", first_img_path)

            if info["label"] in filter_dict and filter_box_imp(data_struct, car_idx, frm_idx, filter_dict[info["label"]]) == 0:
                data_struct[car_idx][frm_idx] = None
                # data_struct[car_idx][frm_idx]["label"] = "remove"



def xml_to_via_imp(xml_path):
    print(xml_path)
    stem = osp.basename(xml_path).split(".xml")[0]
    img_dir = osp.join(osp.dirname(xml_path).replace("-annotation/", "-video/"), stem)
    img_prefix_path = osp.join(img_dir, stem+".mp4_{}.jpg")
    first_img_path = img_prefix_path.format(2)
    
    # van
    disable_label = ["person", "bicycle", "motorcycle", "large-block", "small-block"] 

    data_struct = store_to_struct(xml_path, disable_label)
    filter_box(data_struct, first_img_path)
    save_to_via(data_struct, img_dir, img_prefix_path)

def xml_to_via(xml_path):
    if os.path.isdir(xml_path):
        xml_paths = filesystem.get_all_filepath(xml_path, [".xml", ".XML"])
    else:
        xml_paths = [xml_path]


    for xml_path in xml_paths:
        xml_to_via_imp(xml_path)

    # thread_count = min(12, len(xml_paths)) 
    # # print('Parent process %s.' % os.getpid())
    # p = Pool(thread_count)
    # for xml_path in xml_paths:
    #     # xml_to_via_imp(xml_path)
    #     p.apply_async(xml_to_via_imp, args=(xml_path, ))
    # # print('Waiting for all subprocesses done...')
    # p.close()
    # p.join()

def convert_cvat_xml_v10_to_v11(xml_path, save_dir):
    os.makedirs(save_dir, exist_ok=True)
    E = objectify.ElementMaker(annotate=False)
    anno_tree = E.annotations(
        E.version("1.1")
    )

    tree = ET.parse(open(xml_path))
    root = tree.getroot()
    tracks = root.findall('track')
    for idx,track in enumerate(tracks):
        boxs = track.findall('box')
        track.attrib["source"] = "manual"
        xml_track = E.track("", track.attrib)

        for idx, box in enumerate(boxs):
            box.attrib["z_order"] = "0"
            box.attrib["keyframe"] = "1"
            box.attrib["outside"] = "0"
            if idx == len(boxs)-1:
                box.attrib["outside"] = "1"
            
            xml_track.append(
                E.box("", box.attrib)
            )
        anno_tree.append(xml_track)

    
    # anno_tree = instance2xml_base(anno)
    save_path = osp.join(save_dir, osp.basename(xml_path))
    # 生成最终的xml文件，对应一张图片+
    etree.ElementTree(anno_tree).write(save_path, pretty_print=True)





def vbb2xml():
    vbb_inputdir = "/home/zhou/数据集/行人检测/Caltech_Pedestrian_Detection_Benchmark/data/annotations"
    vbb_outputdir = "/home/zhou/数据集/行人检测/Caltech_Pedestrian_Detection_Benchmark/Annotations"
    parse_anno_file(vbb_inputdir, vbb_outputdir)


if __name__ == '__main__':
    """
Computer Vision Annotation Tool
CVAT is completely re-designed and re-implemented version of Video Annotation Tool from Irvine, California tool. It is free, online, interactive video and image annotation tool for computer vision. It is being used by our team to annotate million of objects with different properties. Many UI and UX decisions are based on feedbacks from professional data annotation team.

Server version: 2.7.6

Core version: 12.0.0

Canvas version: 2.18.0

UI version: 1.58.0
    """
    # seq2img()

    # vbb2xml()

    # filter_xml()

    # last()


    # xml_path = r"F:\work\dataset\video\road_vehicle\training-annotation\0001\0b37ad02fe8b7147857325f6ed5eb285.xml"
    # save_dir = r"F:\work\dataset\video\road_vehicle\training-annotation\v11"
    # convert_cvat_xml_v10_to_v11(xml_path, save_dir)

    # xml_path = "/home/sunjie/work/dataset/det/vihicle/training-annotation/0001/0b37ad02fe8b7147857325f6ed5eb285.xml"
    xml_path = sys.argv[1]
    xml_to_via(xml_path)
