from argparse import ArgumentParser
import os
import glob
import numpy as np
import cv2
import time
from tqdm import tqdm
import json

import sys
from xml.etree import ElementTree
from xml.etree.ElementTree import Element, SubElement
from lxml import etree
import codecs
XML_EXT = '.xml'
ENCODE_METHOD = 'utf-8'

from mmdet.apis import inference_detector, init_detector


class PascalVocWriter:

    def __init__(self, foldername, filename, imgSize,databaseSrc='Unknown', localImgPath=None):
        self.foldername = foldername
        self.filename = filename
        self.databaseSrc = databaseSrc
        self.imgSize = imgSize
        self.boxlist = []
        self.localImgPath = localImgPath
        self.verified = False

    def prettify(self, elem):
        """
            Return a pretty-printed XML string for the Element.
        """
        rough_string = ElementTree.tostring(elem, 'utf8')
        root = etree.fromstring(rough_string)
        return etree.tostring(root, pretty_print=True, encoding=ENCODE_METHOD).replace("  ".encode(), "\t".encode())
        # minidom does not support UTF-8
        '''reparsed = minidom.parseString(rough_string)
        return reparsed.toprettyxml(indent="\t", encoding=ENCODE_METHOD)'''

    def genXML(self):
        """
            Return XML root
        """
        # Check conditions
        if self.filename is None or \
                self.foldername is None or \
                self.imgSize is None:
            return None

        top = Element('annotation')
        if self.verified:
            top.set('verified', 'yes')

        folder = SubElement(top, 'folder')
        folder.text = self.foldername

        filename = SubElement(top, 'filename')
        filename.text = self.filename

        if self.localImgPath is not None:
            localImgPath = SubElement(top, 'path')
            localImgPath.text = self.localImgPath

        source = SubElement(top, 'source')
        database = SubElement(source, 'database')
        database.text = self.databaseSrc

        size_part = SubElement(top, 'size')
        width = SubElement(size_part, 'width')
        height = SubElement(size_part, 'height')
        depth = SubElement(size_part, 'depth')
        width.text = str(self.imgSize[1])
        height.text = str(self.imgSize[0])
        if len(self.imgSize) == 3:
            depth.text = str(self.imgSize[2])
        else:
            depth.text = '1'

        segmented = SubElement(top, 'segmented')
        segmented.text = '0'
        return top

    def addBndBox(self, xmin, ymin, xmax, ymax, name, difficult):
        bndbox = {'xmin': xmin, 'ymin': ymin, 'xmax': xmax, 'ymax': ymax}
        bndbox['name'] = name
        bndbox['difficult'] = difficult
        self.boxlist.append(bndbox)

    def appendObjects(self, top):
        for each_object in self.boxlist:
            object_item = SubElement(top, 'object')
            name = SubElement(object_item, 'name')
            name.text = each_object['name']
            pose = SubElement(object_item, 'pose')
            pose.text = "Unspecified"
            truncated = SubElement(object_item, 'truncated')
            if int(float(each_object['ymax'])) == int(float(self.imgSize[0])) or (int(float(each_object['ymin']))== 1):
                truncated.text = "1" # max == height or min
            elif (int(float(each_object['xmax']))==int(float(self.imgSize[1]))) or (int(float(each_object['xmin']))== 1):
                truncated.text = "1" # max == width or min
            else:
                truncated.text = "0"
            difficult = SubElement(object_item, 'difficult')
            difficult.text = str( bool(each_object['difficult']) & 1 )
            bndbox = SubElement(object_item, 'bndbox')
            xmin = SubElement(bndbox, 'xmin')
            xmin.text = str(each_object['xmin'])
            ymin = SubElement(bndbox, 'ymin')
            ymin.text = str(each_object['ymin'])
            xmax = SubElement(bndbox, 'xmax')
            xmax.text = str(each_object['xmax'])
            ymax = SubElement(bndbox, 'ymax')
            ymax.text = str(each_object['ymax'])

    def save(self, targetFile=None):
        root = self.genXML()
        self.appendObjects(root)
        out_file = None
        if targetFile is None:
            out_file = codecs.open(
                self.filename + XML_EXT, 'w', encoding=ENCODE_METHOD)
        else:
            out_file = codecs.open(targetFile, 'w', encoding=ENCODE_METHOD)

        prettifyResult = self.prettify(root)
        out_file.write(prettifyResult.decode('utf8'))
        out_file.close()


def show_result_pyplot(model, img, result, score_thr=0.3):
    """Visualize the detection results on the image.

    Args:
        model (nn.Module): The loaded detector.
        img (str or np.ndarray): Image filename or loaded image.
        result (tuple[list] or list): The detection result, can be either
            (bbox, segm) or just bbox.
        score_thr (float): The threshold to visualize the bboxes and masks.
        fig_size (tuple): Figure size of the pyplot figure.
    """
    if hasattr(model, 'module'):
        model = model.module
    img = model.show_result(img, result, score_thr=score_thr, show=False)
    return img


def generate_xml_annotation(result, image_name, image_size, dataset_folder, categories_model, categories_annotation, score_thr):
    """
    Args:
        result (tuple[list] or list): The detection result, can be either
            (bbox, segm) or just bbox.
        image_name (str): image base filename.
        image_size (tuple): (H,W) image size.
        dataset_folder (str): annotations parent folder path 'dataset'. e.g. dataset/Annotations/1.xml
        categories_model (dict): detector output name.
        categories_annotation (tuple): need to annotation categories.
        score_thr (float): detection confidence threshold.
    """
    pascal_writer = PascalVocWriter(foldername=dataset_folder, filename=image_name, imgSize=image_size)
    if isinstance(result, tuple):
        bbox_result = result[0]
    else:
        bbox_result = result
    for label, bboxes in enumerate(bbox_result):
        category = categories_model[label]
        for bbox in bboxes:
            xmin, ymin, xmax, ymax, score = bbox
            if category in categories_annotation and score > score_thr:
                xmin, ymin, xmax, ymax = tuple(map(int, (xmin, ymin, xmax, ymax)))
                pascal_writer.addBndBox(xmin, ymin, xmax, ymax, category, 0)

    save_path = os.path.join(dataset_folder, 'Annotations', image_name.replace('jpg', 'xml'))
    pascal_writer.save(targetFile=save_path)
    # print(f'save at {save_path}')


def get_polygons_from_bin_mask(bin_mask, min_area=100.0, epsilon_param=8e-4, pt_type=int, add_closept=False):
    contours, _ = cv2.findContours(bin_mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_TC89_KCOS)  # contours, hierarchy

    contour_approxed_list = list()
    for contour in contours:
        if cv2.contourArea(contour) < min_area: continue
        epsilon = epsilon_param * cv2.arcLength(curve=contour, closed=True)
        contour_approxed = cv2.approxPolyDP(curve=contour, epsilon=epsilon, closed=True)

        # Convert data type of contours for serializing (np.ndarray --> list, np.int64 --> pt_type)
        contour_approxed_converted = list()
        for xy in contour_approxed:
            xy = list(map(pt_type, xy[0]))
            contour_approxed_converted.append(xy)

        # Append end point for representing closed
        if add_closept:
            contour_approxed_converted.append(contour_approxed_converted[0])
        
        contour_approxed_list.append(contour_approxed_converted)
    return contour_approxed_list


def generate_labelme_annotation(result, image_name, image_size, dataset_folder, categories_model, categories_annotation, score_thr):
    json_dict = {
        "version": "4.0.0",
        "flags": {},
        "shapes":[],
        "imagePath": "",
        "imageData": None,
        "imageHeight": image_size[0],
        "imageWidth": image_size[1]
    }
    
    if isinstance(result, tuple):
        bbox_result, seg_result = result
    else:
        bbox_result, seg_result = result, None
    # TODO: add bbox annotations, now only has segmentation annotations
    for label in range(len(bbox_result)):
        bboxes = bbox_result[label]
        segments = seg_result[label]
        for bbox, seg in zip(bboxes, segments):
          category = categories_model[label]
          xmin, ymin, xmax, ymax, score = bbox
          if category in categories_annotation and score > score_thr:
            polygons = get_polygons_from_bin_mask(seg)
            if len(polygons) > 0:
                shape = {
                    "label": category,
                    "points": polygons[0],
                    "group_id": 0,
                    "shape_type": "polygon",
                    "flags": {}
                    }
                json_dict['shapes'].append(shape)

    save_path = os.path.join(dataset_folder, 'Annotations', image_name.replace('jpg', 'json'))
    json.dump(json_dict, open(save_path, 'w'))
    # print(f'save at {save_path}')


def main():
    parser = ArgumentParser(description="Auto Annotation by MMdetection.")
    parser.add_argument('format', help='annotation format， only support `voc` and `labelme`')
    parser.add_argument('dataset', help='dataset folder')
    parser.add_argument('--config', help='Config file for mmdetection', default='')
    parser.add_argument('--checkpoint', help='Checkpoint file for mmdetection', default='')
    parser.add_argument('--device', default='cuda:0', help='Device used for inference')
    parser.add_argument('--score-thr', type=float, default=0.5, help='bbox score threshold')
    parser.add_argument('--show', action='store_true', help='whether to show result')
    args = parser.parse_args()
    
    assert args.format in ('voc', 'labelme')
    if not os.path.exists(os.path.join(args.dataset, 'Images')):
        raise ValueError('not exist Images folder')
    image_paths = glob.glob(os.path.join(args.dataset, 'Images', '*.jpg'))
    if not os.path.exists(os.path.join(args.dataset, 'Annotations')):
        os.mkdir(os.path.join(args.dataset, 'Annotations'))
    
    model = init_detector(args.config, args.checkpoint, device=args.device)

    try:
        json_data = json.load(open('categories.json'))
        categories_model = json_data['categories_model']
        categories_annotation = json_data['categories_annotation']
    except:
        raise ValueError("error in categories.json")

    for image_path in tqdm(image_paths):
        image = cv2.imread(image_path)
        image_name = os.path.basename(image_path)
        H,W = image.shape[:2]
        result = inference_detector(model, image)
        if args.format == 'voc':
            generate_xml_annotation(
                result, image_name, (H,W), args.dataset, 
                categories_annotation, categories_model, args.score_thr)
        else:
            generate_labelme_annotation(
                result, image_name, (H,W), args.dataset, 
                categories_annotation, categories_model, args.score_thr)

        if args.show:
            # Show images
            show_image = show_result_pyplot(model, image, result, score_thr=args.score_thr)
            cv2.namedWindow('Result', cv2.WINDOW_AUTOSIZE)
            cv2.imshow('Result', show_image)
            if cv2.waitKey(1) == 27:
                break


if __name__ == '__main__':
    main()
