# -*- coding:utf-8 -*-
import os, sys
import os.path as osp
import fnmatch
import shutil
from tqdm import tqdm
import os, glob, json, copy
from scipy.io import loadmat
from collections import defaultdict
from multiprocessing import Pool
import numpy as np
import cv2
from lxml import etree, objectify
import xml.etree.ElementTree as ET

from tool import filesystem, via_tool, opencv_tool, darknet_tool



def instance2xml_base(anno, bbox_type='xyxy'):
    """bbox_type: xyxy (xmin, ymin, xmax, ymax); xywh (xmin, ymin, width, height)"""
    assert bbox_type in ['xyxy', 'xywh']
    E = objectify.ElementMaker(annotate=False)
    anno_tree = E.annotation(
        E.folder('VOC2014_instance/person'),
        E.filename(anno['id']),
        E.source(
            E.database('Caltech pedestrian'),
            E.annotation('Caltech pedestrian'),
            E.image('Caltech pedestrian'),
            E.url('None')
        ),
        E.size(
            E.width(640),
            E.height(480),
            E.depth(3)
        ),
        E.segmented(0),
    )
    for index, bbox in enumerate(anno['bbox']):
        bbox = [float(x) for x in bbox]
        if bbox_type == 'xyxy':
            xmin, ymin, w, h = bbox
            xmax = xmin+w
            ymax = ymin+h
        else:
            xmin, ymin, xmax, ymax = bbox
        E = objectify.ElementMaker(annotate=False)
        anno_tree.append(
            E.object(
            E.name(anno['label']),
            E.bndbox(
                E.xmin(int(xmin)),
                E.ymin(int(ymin)),
                E.xmax(int(xmax)),
                E.ymax(int(ymax))
            ),
            E.difficult(0),
            E.occlusion(anno["occlusion"][index])
            )
        )
    return anno_tree


def parse_anno_file(vbb_inputdir, vbb_outputdir):
    # annotation sub-directories in hda annotation input directory
    assert os.path.exists(vbb_inputdir)
    sub_dirs = os.listdir(vbb_inputdir)     # 对应set00,set01...
    for sub_dir in sub_dirs:
        print("Parsing annotations of camera: ", sub_dir)
        cam_id = sub_dir
        # 获取某一个子set下面的所有vbb文件
        vbb_files = glob.glob(os.path.join(vbb_inputdir, sub_dir, "*.vbb"))
        for vbb_file in vbb_files:
            # 返回一个vbb文件中所有的帧的标注结果
            annos = vbb_anno2dict(vbb_file, cam_id)
            
            if annos:
                # 组成xml文件的存储文件夹，形如“/Users/chenguanghao/Desktop/Caltech/xmlresult/”
                vbb_outdir = vbb_outputdir
                                              
                # 如果不存在 vbb_outdir
                if not os.path.exists(vbb_outdir):
                    os.makedirs(vbb_outdir)

                for filename, anno in sorted(annos.items(), key=lambda x: x[0]):                  
                    if "bbox" in anno:
                        anno_tree = instance2xml_base(anno)
                        outfile = os.path.join(vbb_outdir, os.path.splitext(filename)[0]+".xml")
                        print("Generating annotation xml file of picture: ", filename)
                        # 生成最终的xml文件，对应一张图片+
                        etree.ElementTree(anno_tree).write(outfile, pretty_print=True)



def store_to_struct(xml_path, disable_label):
    in_file = open(xml_path)
    tree = ET.parse(in_file)
    root = tree.getroot()
    
    data_struct = defaultdict(list)
    tracks = root.findall('track')
    for idx,track in enumerate(tracks):
        boxs = track.findall('box')
        label = track.attrib["label"]
        id_ = track.attrib["id"]
        if label in disable_label: continue

        for box in boxs:
            attr_dict = box.attrib
            frame_cnt = int(attr_dict["frame"])
            outside = attr_dict["outside"]
            if outside == "1":continue
            
            attr_dict["label"] = label
            attr_dict["id"] = id_

            x = int(eval(attr_dict["xtl"]))
            y = int(eval(attr_dict["ytl"]))
            w = int(eval(attr_dict["xbr"])) - x
            h = int(eval(attr_dict["ybr"])) - y
            attr_dict["xywh"] = [x,y,w,h]

            data_struct[frame_cnt].append(attr_dict)

    return data_struct


def check_iou(regions, region):
    for idx,rgn in enumerate(regions):
        ratio, _ = opencv_tool.iou_3(rgn["xywh"], region["xywh"])
        if ratio < 0.6: continue

        # 舍弃
        if len(rgn["xywh"]) == 4 and len(region["xywh"]) == 5: 
            return True

        # 将得分低的覆盖
        if len(rgn["xywh"]) == 5 and len(region["xywh"]) == 5 and rgn["xywh"][4] < region["xywh"][4]:
            regions[idx] = region
            return True
    return False


def merge_box(data_struct, via_path):
    with open(via_path,"r") as rf:
        data_via = json.loads(rf.read())

    sorted_idx = []
    region_data = []
    for idx, (k,v) in enumerate(data_via.items()):
        filename = v["filename"]
        sorted_idx.append([filename, idx])
        cur_regions = []
        for region in v["regions"]:
            score = float(region["region_attributes"]["score"])
            if score < 0.5: continue
            r = {}
            r["label"] = region["region_attributes"]["label"]
            x = region["shape_attributes"]["x"]
            y = region["shape_attributes"]["y"]
            w = region["shape_attributes"]["width"]
            h = region["shape_attributes"]["height"]
            r["xywh"] = [x,y,w,h, score]
            cur_regions.append(r)
        region_data.append(cur_regions)
    
    sorted_idx = sorted(sorted_idx, key=lambda x: x[0])
    for frame_idx, (filename, ori_idx) in enumerate(sorted_idx):

        for region in region_data[ori_idx]:
            # 当region校验不合格时将不会保留当前region
            if check_iou(data_struct[frame_idx], region): continue
            data_struct[frame_idx].append(region)




def save_to_via(data_struct, img_dir, img_prefix_path, via_name="via_region_data.json"):
    via_dict = {}
    frame_cnt = len(data_struct[0])

    for frm_idx in range(frame_cnt):
        boxs = []
        for car_idx, line in enumerate(data_struct):
            attr_dict = data_struct[car_idx][frm_idx]
            if attr_dict == None: continue
            box = attr_dict["xywh"]
            box.append([["label", attr_dict["label"]], ["occluded", attr_dict["occluded"]], ["cut", attr_dict["cut"]], ["id", attr_dict["id"]]])
            boxs.append(box)

        img_path = img_prefix_path.format(frm_idx)
        name, item_dict = via_tool.gen_via_item_dict(img_path, "rect", boxs)
        via_dict[name] = item_dict

    with open(osp.join(img_dir, via_name), "w") as rf:
        rf.write(json.dumps(via_dict))

def filter_box_imp(data_struct, car_idx, frm_idx, filter_labels):
    box = data_struct[car_idx][frm_idx]["xywh"]

    for car_index, line in enumerate(data_struct):
        if car_index == car_idx: continue
        if line[frm_idx] == None: continue
        if line[frm_idx]["label"] not in filter_labels: continue

        box2 = line[frm_idx]["xywh"]
        ratio, _ = opencv_tool.iou_3(box, box2)
        if ratio < 0: continue

        if ratio > 0.4 and box[2]*box[3] < box2[2]*box2[3]:
            return 0

def remove_small_box(line, order, img_shape, filter_box_min_size):

    index = 0 if order == 0 else (len(line)-1)
    step = 1 if order == 0 else -1

    flag = False
    while not flag and index < len(line) and index >= 0:
        if line[index] == None:
            index+=step
            continue
        
        label = line[index]["label"]
        ratio_w = filter_box_min_size[label] * img_shape[1] / 1920
        ratio_h = filter_box_min_size[label] * img_shape[0] / 1080
        if line[index]["xywh"][2] < ratio_w or line[index]["xywh"][3] < ratio_h:
            # line[index]["label"] = "remove"
            line[index] = None
        else:
            flag = True
        index+=step

def remove_border_box(line, order, img_width, filter_labels):
    index = 0 if order == 0 else (len(line)-1)
    step = 1 if order == 0 else -1

    normal_width = 0
    right_in = False
    right_out = False
    left_in = False
    left_out = False
    while index < len(line) and index >= 0:
        if line[index] == None or line[index]["label"] not in filter_labels:
            index+=step
            continue
        
        # 右进
        if order == 0 and line[index]["xywh"][0] + line[index]["xywh"][2] >= img_width-10: 
            right_in = True
        # 右出
        elif order == 1 and line[index]["xywh"][0] + line[index]["xywh"][2] >= img_width-10:
            right_out = True
        # 左进
        elif order == 0 and line[index]["xywh"][0] <= 10: 
            left_in = True
        # 左出
        elif order == 1 and line[index]["xywh"][0] <= 10:
            left_out = True

        if right_in and line[index]["xywh"][0] + line[index]["xywh"][2] < img_width-10:
            normal_width = line[index]["xywh"][2]
            break
        elif right_out and line[index]["xywh"][0] + line[index]["xywh"][2] < img_width-10:
            normal_width = line[index]["xywh"][2]
            break
        elif left_in and line[index]["xywh"][0] > 10:
            normal_width = line[index]["xywh"][2]
            break
        elif left_out and line[index]["xywh"][0] > 10:
            normal_width = line[index]["xywh"][2]
            break
        index+=step

    if normal_width > 0:
        # print("normal_width:", normal_width)
        # while (order == 0 and index > 0) or (order == 1 and index < len(line)-1):
        while index < len(line) and index >= 0:
            if line[index] == None:
                index-=step
                continue
            label = line[index]["label"]
            if line[index]["xywh"][2] <= normal_width/2: 
                # line[index]["label"] = "remove"
                line[index] = None
            index-=step


def remove_error_ratio_box(line, order, filter_labels):
    index = 0 if order == 0 else (len(line)-1)
    step = 1 if order == 0 else -1

    start_flag = None
    while index < len(line) and index >= 0:
        if line[index] == None:
            index+=step
            continue
        label = line[index]["label"]
        if label not in filter_labels: 
            index+=step 
            continue

        if line[index]["xywh"][3] / line[index]["xywh"][2] > filter_labels[label]:
            start_flag = True
            line[index] = None
        elif start_flag:
            break

        index+=step

def remove_error_ratio_box2(line, order, filter_labels):
    index = 0
    step = 1

    start_flag = None
    while index < len(line) and index >= 0:
        if line[index] == None:
            index+=step
            continue
        label = line[index]["label"]
        if label not in filter_labels: 
            index+=step 
            continue

        if line[index]["xywh"][3] / line[index]["xywh"][2] > filter_labels[label]:
            start_flag = True
            line[index] = None
        # elif start_flag:
        #     break
        index+=step

def filter_box(data_struct, first_img_path):
    frame_cnt = len(data_struct[0])
    img = cv2.imread(first_img_path)
    img_shape = img.shape[:2]
    print("img_shape:", img_shape)

    filter_box_min_size = {
        "open-tricycle": 24,
        "closed-tricycle": 24,
        "car": 24, 
        "van": 26, 
        "forklift": 26, 
        
        "bus": 42,
        "truck": 42
    }
    filter_dict = {
        "closed-tricycle": ["car", "van", "forklift", "open-tricycle", "closed-tricycle"],
        "open-tricycle": ["car", "van", "forklift", "open-tricycle", "closed-tricycle"],
        "forklift": ["car", "van", "forklift", "open-tricycle", "closed-tricycle"],
        "car": ["car", "van", "forklift", "open-tricycle", "closed-tricycle"],
        "van": ["car", "van", "forklift", "open-tricycle", "closed-tricycle"],
        "truck": ["truck", "bus"],
        "bus": ["truck", "bus"],
    }
    filter_dict2 = {
        "open-tricycle": 2.0,
        "closed-tricycle": 2.0,
        "car": 2.0, 
        "van": 2.0, 
        "forklift": 2.0, 
        "bus": 3.3,
        "truck": 3.3
    }
    filter_border_dict = {
        "open-tricycle": 1.5,
        "closed-tricycle": 1.5,
        "car": 1.5, 
        "van": 1.5, 
        "forklift": 1.5, 
        "bus": 3.3,
        "truck": 3.3
    }
    for car_idx, line in enumerate(data_struct):
        # # 去除序列前后较小的框
        remove_small_box(line, 0, img_shape, filter_box_min_size)
        remove_small_box(line, 1, img_shape, filter_box_min_size)

        # 去除序列前后边界框
        remove_border_box(line, 0, img_shape[1], filter_border_dict)
        remove_border_box(line, 1, img_shape[1], filter_border_dict)

        # 去除序列异形边界框 etc. car类别框h大于w的1.5倍
        # remove_error_ratio_box(line, 0, filter_dict2)
        # remove_error_ratio_box(line, 1, filter_dict2)
        remove_error_ratio_box2(line, 0, filter_dict2)

        for frm_idx, info in enumerate(line):
            if info is None: continue
            if info["label"] == "forklift": print("forklift", first_img_path)

            if info["label"] in filter_dict and filter_box_imp(data_struct, car_idx, frm_idx, filter_dict[info["label"]]) == 0:
                data_struct[car_idx][frm_idx] = None
                # data_struct[car_idx][frm_idx]["label"] = "remove"




def convert_cvat_xml_v10_to_v11(xml_path, save_dir):
    os.makedirs(save_dir, exist_ok=True)
    E = objectify.ElementMaker(annotate=False)
    anno_tree = E.annotations(
        E.version("1.1")
    )

    tree = ET.parse(open(xml_path))
    root = tree.getroot()
    tracks = root.findall('track')
    for idx,track in enumerate(tracks):
        boxs = track.findall('box')
        track.attrib["source"] = "manual"
        xml_track = E.track("", track.attrib)

        for idx, box in enumerate(boxs):
            box.attrib["z_order"] = "0"
            box.attrib["keyframe"] = "1"
            box.attrib["outside"] = "0"
            if idx == len(boxs)-1:
                box.attrib["outside"] = "1"
            
            xml_track.append(
                E.box("", box.attrib)
            )
        anno_tree.append(xml_track)

    
    # anno_tree = instance2xml_base(anno)
    save_path = osp.join(save_dir, osp.basename(xml_path))
    # 生成最终的xml文件，对应一张图片+
    etree.ElementTree(anno_tree).write(save_path, pretty_print=True)



def get_all_label(data_dir):
    xml_paths = filesystem.get_all_filepath(data_dir, [".xml"])

    data_struct = defaultdict(int)
    for xml_path in xml_paths:
        in_file = open(xml_path)
        tree = ET.parse(in_file)
        root = tree.getroot()
        
        tracks = root.findall('track')
        for idx,track in enumerate(tracks):
            label = track.attrib["label"]
            data_struct[label] +=1

    save_dir = osp.dirname(osp.abspath(__file__))
    with open(osp.join(save_dir, "label.json"), "w") as rf:
        rf.write(json.dumps(data_struct, ensure_ascii=False))

def save_to_via(data_struct, via_path, via_name="via_region_data.merge.json"):
    with open(via_path,"r") as rf:
        data_via = json.loads(rf.read())

    data_list = list(data_via.items())
    data_list = sorted(data_list, key=lambda x: x[0])

    for frame_idx, rgns in data_struct.items():
        regions = []
        for rg in rgns:
            region = {
                "shape_attributes": {
                    "name": "rect",
                    "x": rg["xywh"][0],
                    "y": rg["xywh"][1],
                    "width": rg["xywh"][2],
                    "height": rg["xywh"][3]
                },
                "region_attributes":{
                    "label": rg["label"],
                }
            }
            regions.append(region)
        data_list[frame_idx][1]["regions"] = regions
    
    data_dict = {}
    for k,v in data_list:
        data_dict[k] = v

    img_dir = osp.dirname(via_path)
    with open(osp.join(img_dir, via_name), "w") as rf:
        rf.write(json.dumps(data_dict))


def merge_to_via(data_dir, filter_label):
    xml_paths = filesystem.get_all_filepath(data_dir, [".xml"])

    for xml_path in xml_paths:
        print(xml_path)
        # 1.
        data_struct = store_to_struct(xml_path, filter_label)

        # 2.
        via_path = osp.join(osp.dirname(xml_path), "images", "via_region_data.rec.json")
        if not osp.exists(via_path):
            print("not exists. ", via_path)
            continue
        merge_box(data_struct, via_path)

        # 3.
        save_to_via(data_struct, via_path)
        
    pass


if __name__ == '__main__':
    # 1.
    # data_dir = "/home/sunjie/work/code/python_script/test_data"
    # get_all_label(data_dir)

    # 2.
    # filter_label = ["机动车乱停放", "带蓬三轮车"]
    # data_dir = "/media/dataset/road_vihicle/images"
    # merge_to_via(data_dir, filter_label)

    # 3.
    data_root = "/home/data/road_vihicle"
    via_name="via_region_data.merge.json" 
    gen_type="road_vihicle.merge"
    # darknet_tool.deal_many_dir_local(data_root, via_name, gen_type)
    darknet_tool.create_train_val_txt(data_root, ext=".jpg")
    darknet_tool.check_darket_train_data(data_root, ext=".jpg")




    # # xml_path = "/home/sunjie/work/dataset/det/vihicle/training-annotation/0001/0b37ad02fe8b7147857325f6ed5eb285.xml"
    # xml_path = sys.argv[1]
    # xml_to_via(xml_path)
