import hashlib
import os
import shutil
import xml.etree.ElementTree as ET

import tensorflow as tf
from object_detection.utils import dataset_util
from object_detection.utils import label_map_util

from dataset.data_cleaner import DatasetCleaner, DataItemCleaner
from dataset.data_splitor import DataSpliter
from data_analysis.data_analyzer import DataAnalyzer


def _dict_to_tf_example(xml_path, img_path, label_map_dict):
    """Convert XML derived dict to tf.Example proto.

    Notice that this function normalizes the bounding box coordinates provided
    by the raw data.

    Returns:
      example: The converted tf.Example.

    Raises:
      ValueError: if the image pointed to by data['filename'] is not a valid JPEG
    """
    with tf.gfile.GFile(img_path, 'rb') as fid:
        encoded_jpg = fid.read()

    key = hashlib.sha256(encoded_jpg).hexdigest()
    tree = ET.parse(xml_path)
    root = tree.getroot()
    size = root.find('size')
    width = int(size.find('width').text)
    height = int(size.find('height').text)
    xmin = []
    ymin = []
    xmax = []
    ymax = []
    classes = []
    classes_text = []
    truncated = []
    poses = []
    difficult_obj = []

    for obj in root.iter('object'):
        name = obj.find('./name')
        if name.text in label_map_dict:
            difficult = bool(int(obj.find('difficult').text))
            xml_box = obj.find('bndbox')
            _xmin = float(xml_box.find('xmin').text) / width
            _xmax = float(xml_box.find('xmax').text) / width
            _ymin = float(xml_box.find('ymin').text) / height
            _ymax = float(xml_box.find('ymax').text) / height
            if (0 <= _xmin < _xmax <= 1) and (0 <= _ymin < _ymax <= 1):
                xmin.append(_xmin)
                xmax.append(_xmax)
                ymin.append(_ymin)
                ymax.append(_ymax)
                difficult_obj.append(int(difficult))
                classes_text.append(obj.find('name').text.encode('utf8'))
                classes.append(label_map_dict[obj.find('name').text])
    # assert len(xmin) == len(classes)
    # assert len(xmin) != 0

    example = tf.train.Example(
        features=tf.train.Features(feature={
            'image/height':
                dataset_util.int64_feature(height),
            'image/width':
                dataset_util.int64_feature(width),
            'image/filename':
                dataset_util.bytes_feature(
                    os.path.basename(xml_path).encode('utf8')),
            'image/source_id':
                dataset_util.bytes_feature(
                    os.path.basename(img_path).encode('utf8')),
            'image/key/sha256':
                dataset_util.bytes_feature(key.encode('utf8')),
            'image/encoded':
                dataset_util.bytes_feature(encoded_jpg),
            'image/format':
                dataset_util.bytes_feature('jpeg'.encode('utf8')),
            'image/object/bbox/xmin':
                dataset_util.float_list_feature(xmin),
            'image/object/bbox/xmax':
                dataset_util.float_list_feature(xmax),
            'image/object/bbox/ymin':
                dataset_util.float_list_feature(ymin),
            'image/object/bbox/ymax':
                dataset_util.float_list_feature(ymax),
            'image/object/class/text':
                dataset_util.bytes_list_feature(classes_text),
            'image/object/class/label':
                dataset_util.int64_list_feature(classes),
            'image/object/difficult':
                dataset_util.int64_list_feature(difficult_obj),
            'image/object/truncated':
                dataset_util.int64_list_feature(truncated),
            'image/object/view':
                dataset_util.bytes_list_feature(poses),
        }))
    return example


def cleaning_dataset(dataset_dir, label_filtering_rule):
    """
    数据集清洗
    :param dataset_dir:
    :param label_filtering_rule:
    :return:
    :raise: FileNotFoundError if dataset_dir not exits.
    """
    dc = DatasetCleaner(dataset_dir, label_filtering_rule)
    label_list = dc.del_label_with_blank()
    dc.correlation_filename()
    dc.process_img_size_problem()
    annotation_nums, image_nums, dataset_items = dc.process_xml_without_picture()

    return {"label_list": label_list, "annotation_nums": annotation_nums,
            "image_nums": image_nums, "dataset_items": dataset_items}


def cleaning_xml(xml_path, label_filtering_rule, old_label_list) -> (bool, list):
    """

    :param xml_path:
    :param label_filtering_rule:
    :param old_label_list:
    :return:
    :raise: FileNotFoundError if xml_path not exits.
    """
    labels, objcnt = DataItemCleaner.del_label_with_blank(xml_path, label_filtering_rule)
    labels_set = set(labels) | set(old_label_list)
    valid = objcnt != 0
    return valid, list(labels_set)


def create_label_map(label_list: list, output_file_path):
    """
    :param label_list:
    :param output_file_path:
    """
    cnt = 0
    with open(output_file_path, 'w') as f:
        for idx, label in enumerate(label_list):
            f.write("item {\n")
            f.write("  id: " + str(idx + 1) + "\n")
            f.write("  name: '" + str(label) + "'\n")
            f.write("}\n\n")
            cnt += 1
    return cnt


def create_tfrecords(dataset_dir, label_map_path, output_file_path):
    """
    :param dataset_dir:
    :param label_map_path:
    :param output_file_path:
    :return:
    :raise: FileNotFoundError if dataset_dir or label_map_path not exits.
    """
    writer = tf.python_io.TFRecordWriter(output_file_path)
    count = 0
    label_map_dict = label_map_util.get_label_map_dict(label_map_path)
    for data_path in os.listdir(dataset_dir):
        full_path = os.path.join(dataset_dir, data_path)
        postfix = data_path.split('.')[-1]
        if postfix == 'jpg' or postfix == 'png' or postfix == 'jpeg':
            try:
                xml_path = os.path.join(dataset_dir, data_path.split('.')[0] + '.xml')
                tf_example = _dict_to_tf_example(xml_path, full_path, label_map_dict)
                writer.write(tf_example.SerializeToString())
                count += 1
            except IOError:
                continue
    writer.close()
    return count


def analyze_sample_distribution(dataset_dir, class_list):
    sample_distribution = DataAnalyzer()
    return sample_distribution.analyze_sample_distribution(dataset_dir, class_list)


def analyze_data(dataset_dict, class_list):
    data = DataAnalyzer()
    return data.analyze_data(dataset_dict, class_list)


def save(save_path_dict, data_split_dict):
    for key, value in data_split_dict.items():
        for key_0, value_0 in save_path_dict.items():
            if key == key_0:
                for old_path in value:
                    new_img_path = value_0 + '\\' + old_path[0].split('\\')[-1]
                    new_xml_path = value_0 + '\\' + old_path[1].split('\\')[-1]
                    shutil.copy(old_path[0], new_img_path)
                    shutil.copy(old_path[1], new_xml_path)


def data_split(data_dir, class_list, split_ratio_dict, random_str):
    data_sp = DataSpliter(data_dir, class_list, split_ratio_dict, random_str)
    return data_sp.data_split()
