
import argparse
import os
import json
import cv2
from tqdm import tqdm
import numpy as np
import cv2


def find_contours(*args, **kwargs):
    """
    Wraps cv2.findContours to maintain compatiblity between versions
    3 and 4
    Returns:
        contours, hierarchy
    """
    if cv2.__version__.startswith('4'):
        contours, hierarchy = cv2.findContours(*args, **kwargs)
    elif cv2.__version__.startswith('3'):
        _, contours, hierarchy = cv2.findContours(*args, **kwargs)
    else:
        raise AssertionError(
            'cv2 must be either version 3 or 4 to call this method')

    return contours, hierarchy


def main():
    parser = argparse.ArgumentParser(description="Convert VisDrone annotation to spire annotation")
    parser.add_argument(
        "--mot-anno",
        default=r"D:\dataset\2024-06-05-ARMOT\test",
        help="path to VisDrone MOT annotation file",
    )
    parser.add_argument(
        "--output-dir",
        default=r"D:\dataset\Spire-ARMOT-test-v240614",
        help="path to spire home dir",
    )
    args = parser.parse_args()

    category_id_to_name = ['null',
                           'drone']

    sub_dirs = os.listdir(args.mot_anno)
    for sub_dir in sub_dirs:
        img_dir = os.path.join(args.mot_anno, sub_dir, 'img1')
        gt_txt = os.path.join(args.mot_anno, sub_dir, 'gt', 'gt.txt')

        f = open(gt_txt, 'r')
        anno_lines = f.readlines()
        """
        <frame_index>,<target_id>,<bbox_left>,<bbox_top>,<bbox_width>,<bbox_height>,<score>,<object_category>,<truncation>,<occlusion>
        <object_category>:	  
            The object category indicates the type of annotated object, (i.e., ignored regions(0), pedestrian(1), 
            people(2), bicycle(3), car(4), van(5), truck(6), tricycle(7), awning-tricycle(8), bus(9), motor(10), 
            others(11))
        """
        anno_frame_index = {}
        frame_min, frame_max = 1e8, -1
        for line in anno_lines:
            line_split = line.split(',')
            fi = int(line_split[0])
            fi_ext = [float(x) for x in line_split[1:]]
            if fi in anno_frame_index.keys():
                anno_frame_index[fi].append(fi_ext)
            else:
                anno_frame_index[fi] = [fi_ext]
            if fi > frame_max:
                frame_max = fi
            if fi < frame_min:
                frame_min = fi

        for fi in anno_frame_index.keys():
            image_name = str(fi).zfill(6) + '.jpg'
            image_fn = os.path.join(img_dir, image_name)
            image = cv2.imread(image_fn)
            # cv2.imshow('image', image)
            # cv2.waitKey()
            # Prepare JSON dictionary for a single image.
            spire_dict = {}
            spire_dict['file_name'] = sub_dir + '_' + image_name
            spire_dict['height'], spire_dict['width'] = image.shape[0], image.shape[1]

            spire_dict['annos'] = []
            for fi_ext in anno_frame_index[fi]:
                target_id, bbox_left, bbox_top, bbox_width, bbox_height, object_category, truncation, \
                occlusion = int(fi_ext[0]), fi_ext[1], fi_ext[2], fi_ext[3], fi_ext[4], \
                            int(fi_ext[5]), int(fi_ext[6]), int(fi_ext[7])
                spire_anno = {}
                spire_anno['tracked_id'] = target_id
                spire_anno['area'] = bbox_width * bbox_height
                spire_anno['bbox'] = [bbox_left, bbox_top, bbox_width, bbox_height]
                spire_anno['truncation'] = truncation
                spire_anno['occlusion'] = occlusion
                spire_anno['category_name'] = category_id_to_name[object_category]
                spire_dict['annos'].append(spire_anno)

            scaled_images = os.path.join(args.output_dir, sub_dir, 'scaled_images')
            annotations = os.path.join(args.output_dir, sub_dir, 'annotations')
            if not os.path.exists(annotations):
                os.makedirs(annotations)
            if not os.path.exists(scaled_images):
                os.makedirs(scaled_images)

            # Generate spire annotation files for each image
            output_fn = os.path.join(annotations, sub_dir + '_' + image_name + '.json')
            with open(output_fn, "w") as f:
                json.dump(spire_dict, f)

            # seq_scaled_images = os.path.join(scaled_images, txt[:-4])
            # if not os.path.exists(seq_scaled_images):
            #     os.makedirs(seq_scaled_images)

            open(os.path.join(scaled_images, sub_dir + '_' + image_name), 'wb').write(
                open(image_fn, 'rb').read())


if __name__ == '__main__':
    main()
