# -*- coding: utf-8 -*-
"""
YOLOv3的resized方式是embed，即使用黑块填充较短的一边（w or h）,并非v2中的平铺拉伸，
目标框的宽高比并未因为resized操作发生变化（resize 到网络输入如 416x416），
但因较短的一边进行了填充，其相对于填充后的比例发生变化。

综上：Yolov3中anchor boxes是相对于网络输入的比例，对于填充的一边需要调整，而Yolov2中是相对于输出特征图的比例
"""

import os
import sys
import xml.etree.ElementTree as ET
import argparse
import numpy as np


class Box:
    def __init__(self, x, y, w, h):
        self.x = x
        self.y = y
        self.w = w
        self.h = h


def overlap(center_1, len_1, center_2, len_2):
    """
    compute axis overlap
    :param center_1: box1 center on one axis
    :param len_1:    box1 length on one axis
    :param center_2: box2 center on one axis
    :param len_2:    box2 length on one axis
    :return: overlap
    """
    len1_half = len_1 / 2
    len2_half = len_2 / 2

    axis_min = max(center_1 - len1_half, center_2 - len2_half)
    axis_max = min(center_1 + len1_half, center_2 + len2_half)

    return axis_max - axis_min


def box_intersection(a, b):
    """
    compute intersection
    :param a: Box a
    :param b: Box b
    :return: intersection
    """
    overlap_w = overlap(a.x, a.w, b.x, b.w)
    overlap_h = overlap(a.y, a.h, b.y, b.h)
    if overlap_w <= 0 or overlap_h <= 0:
        return 0

    area = overlap_w * overlap_w
    return area


def box_union(a, b):
    """
    compute union
    :param a: Box a
    :param b: Box b
    :return: union
    """
    intersection = box_intersection(a, b)
    union = a.w * a.h + b.w * b.h - intersection
    return union


def box_iou(a, b):
    """
    compute iou
    :param a: Box a
    :param b: Box b
    :return: iou
    """
    return box_intersection(a, b) / box_union(a, b)


def init_centroids(boxes, n_anchors):
    """
    使用k-means ++ 初始化 centroids，减少随机初始化的centroids对最终结果的影响
    :param boxes: 所有bounding boxes的Box对象列表
    :param n_anchors:
    :return: centroids 是初始化的n_anchors个centroid
    """
    centroids = []
    boxes_num = len(boxes)

    centroid_index = np.random.choice(boxes_num, 1)[0]
    centroids.append(boxes[centroid_index])

    for centroid_index in range(0, n_anchors-1):

        sum_distance = 0
        distance_list = []
        cur_sum = 0

        for box in boxes:
            min_distance = 1
            for centroid_i, centroid in enumerate(centroids):
                distance = (1 - box_iou(box, centroid))
                if distance < min_distance:
                    min_distance = distance
            sum_distance += min_distance
            distance_list.append(min_distance)

        distance_thresh = sum_distance * np.random.random()

        for i in range(0, boxes_num):
            cur_sum += distance_list[i]
            if cur_sum > distance_thresh:
                centroids.append(boxes[i])
                print(boxes[i].w, boxes[i].h)
                break

    return centroids


def do_kmeans(n_anchors, boxes, centroids):
    """
    进行 k-means 计算新的centroids
    :param n_anchors: k-means的k值
    :param boxes: 所有bounding boxes的Box对象列表
    :param centroids: 所有簇的中心
    :return: new_centroids 是计算出的新簇中心;
    groups是n_anchors个簇包含的boxes的列表;
    loss是所有box距离所属的最近的centroid的距离的和
    """
    loss = 0
    groups = []
    new_centroids = []
    for i in range(n_anchors):
        groups.append([])
        new_centroids.append(Box(0, 0, 0, 0))

    for box in boxes:
        min_distance = 1
        group_index = 0
        for centroid_index, centroid in enumerate(centroids):
            distance = (1 - box_iou(box, centroid))
            if distance < min_distance:
                min_distance = distance
                group_index = centroid_index
        groups[group_index].append(box)
        loss += min_distance
        new_centroids[group_index].w += box.w
        new_centroids[group_index].h += box.h

    for i in range(n_anchors):
        if groups[i]:
            new_centroids[i].w /= len(groups[i])
            new_centroids[i].h /= len(groups[i])
        else:
            new_centroids[i].w = centroids[i].w
            new_centroids[i].h = centroids[i].h

    return new_centroids, groups, loss


def compute_centroids(train_txt, n_anchors, loss_convergence,
                      resized_w_h, resize_type, iterations_num):
    """
    计算给定bounding boxes的n_anchors数量的centroids
    :param train_txt:     训练集列表文件地址
    :param n_anchors:     anchors的数量
    :param loss_convergence: 允许的loss的最小变化值
    :param resized_w_h: 网络图像输入大小
    :param resize_type: PAVE or EMBED
    :param iterations_num: 最大迭代次数
    :return:centroids 聚类中心
    """

    boxes = []
    train_file = open(train_txt, 'r')
    random_init_centroid = False

    net_w, net_h = resized_w_h[0], resized_w_h[1]
    for line in train_file.readlines():
        if resize_type == "PAVE":
            new_w = net_w
            new_h = net_h
        else:
            # EMBED resize keeps a fixed ratio.
            xml_file = line.strip().replace('JPEGImages', 'Annotations').split('.')[0]+'.xml'
            root = ET.parse(xml_file).getroot()
            img_w = int(float(root.find('size').find('width').text))
            img_h = int(float(root.find('size').find('height').text))
            if net_w / img_w < net_h / img_h:
                new_w = net_w
                new_h = img_h * (net_w / img_w)
            else:
                new_w = img_w * (net_h / img_h)
                new_h = net_h

        label_file = line.strip().replace('JPEGImages', 'labels').split('.jpg')[0].replace(".", "_")+'.txt'
        with open(label_file) as f:
            for info in f:
                label_info = info.strip().split()
                if len(label_info) > 1:
                    # only iou will be concerned when computing distance.
                    # Thus, centroid is set to (0, 0)
                    boxes.append(Box(0, 0,
                                     float(label_info[3]) * new_w / net_w,
                                     float(label_info[4]) * new_h / net_h))
    train_file.close()

    if not random_init_centroid:
        centroids = init_centroids(boxes, n_anchors)
    else:
        centroid_indices = np.random.choice(len(boxes), n_anchors, replace=False)
        centroids = []
        for centroid_index in centroid_indices:
            centroids.append(boxes[centroid_index])

    # iterate k-means
    centroids, groups, old_loss = do_kmeans(n_anchors, boxes, centroids)
    iterations = 1
    while True:
        centroids, groups, loss = do_kmeans(n_anchors, boxes, centroids)
        iterations = iterations + 1
        print('============== iter {} =============='.format(iterations))
        print("loss = {}".format(loss))
        if abs(old_loss - loss) < loss_convergence or iterations > iterations_num:
            break
        old_loss = loss
        # for centroid in centroids:
        #     print(centroid.w * net_w, centroid.h * net_h)
             
    # print result
    centroids = sorted(centroids, key=lambda box: (box.w * box.h))
    for centroid in centroids:
        print('{:.2f},{:.2f}, '.format(centroid.w * net_w, centroid.h * net_h), end="")
    # 绘图
    # print('\n no graph!')
#    for box in boxes:
#        plt.scatter(box.w,box.h,c='b',s=3)
#    for centroid in centroids:
#        plt.scatter(centroid.w,centroid.h,c='r',s=5)
#    plt.show()


if __name__ == '__main__':
    try:
        ROOT_DIR = sys.argv[1]
        ANCHORS = int(sys.argv[2])
        LOSS_THRES = float(sys.argv[3])
        ITER_NUM = int(sys.argv[4])
    except:
        PARSER = argparse.ArgumentParser(description="arguments")
        PARSER.add_argument("--root_dir", type=str,
                            default="/home/ccj/dataset/face_and_maskface/",
                            help="VOC dataset root directory")
        PARSER.add_argument("--anchors", type=int, default=9, help="Anchor numbers")
        PARSER.add_argument("--loss_threshold", type=float, default=0.00001,
                            help="stop iteration if loss is less than this value")
        PARSER.add_argument("--max_iter", type=int, default=300,
                            help="maximum iteration number")
        ARGS = PARSER.parse_args()
        ROOT_DIR = ARGS.root_dir
        ANCHORS = ARGS.anchors
        LOSS_THRES = ARGS.loss_threshold
        ITER_NUM = ARGS.max_iter
    FILE_LIST = os.path.join(ROOT_DIR, "ImageSets/Main/train_img_path.txt")
    RESIZE_SHAPE = [416, 416]
    # resize方式： PAVE - 直接拉伸； EMBED - 填充黑边
    RESIZE_TYPE = "PAVE"
    compute_centroids(FILE_LIST, ANCHORS, LOSS_THRES, RESIZE_SHAPE, RESIZE_TYPE, ITER_NUM)
