# -*- coding: utf-8 -*-
"""
Created on Tue Jun 27 14:00:16 2017
YOLOv3的resized方式是embed，即使用黑块填充较短的一边（w or h）,并非v2中的平铺拉伸，
目标框的宽高比并未因为resized操作发生变化（resize 到网络输入如 416x416），
但因较短的一边进行了填充，其相对于填充后的比例发生变化。
综上：Yolov3中anchor boxes是相对于网络输入的比例，对于填充的一边需要调整，而Yolov2中是相对于输出特征图的比例

@author: zxl
"""

# coding=utf-8
# k-means ++ for YOLOv2 anchors
# 通过k-means ++ 算法获取YOLOv2需要的anchors的尺寸
import os
import numpy as np
#import matplotlib.pyplot as plt
import xml.etree.ElementTree as ET

# 定义Box类，描述bounding box的坐标
class Box():
    def __init__(self, x, y, w, h):
        self.x = x
        self.y = y
        self.w = w
        self.h = h


# 计算两个box在某个轴上的重叠部分
# x1是box1的中心在该轴上的坐标
# len1是box1在该轴上的长度
# x2是box2的中心在该轴上的坐标
# len2是box2在该轴上的长度
# 返回值是该轴上重叠的长度
def overlap(x1, len1, x2, len2):
    len1_half = len1 / 2
    len2_half = len2 / 2

    left = max(x1 - len1_half, x2 - len2_half)
    right = min(x1 + len1_half, x2 + len2_half)

    return right - left


# 计算box a 和box b 的交集面积
# a和b都是Box类型实例
# 返回值area是box a 和box b 的交集面积
def box_intersection(a, b):
    w = overlap(a.x, a.w, b.x, b.w)
    h = overlap(a.y, a.h, b.y, b.h)
    if w < 0 or h < 0:
        return 0

    area = w * h
    return area


# 计算 box a 和 box b 的并集面积
# a和b都是Box类型实例
# 返回值u是box a 和box b 的并集面积
def box_union(a, b):
    i = box_intersection(a, b)
    u = a.w * a.h + b.w * b.h - i
    return u


# 计算 box a 和 box b 的 iou
# a和b都是Box类型实例
# 返回值是box a 和box b 的iou
def box_iou(a, b):
    return box_intersection(a, b) / box_union(a, b)


# 使用k-means ++ 初始化 centroids，减少随机初始化的centroids对最终结果的影响
# boxes是所有bounding boxes的Box对象列表
# n_anchors是k-means的k值
# 返回值centroids 是初始化的n_anchors个centroid
def init_centroids(boxes,n_anchors):
    centroids = []
    boxes_num = len(boxes)
    if(boxes_num == 0):
        print("boxes num is 0!!!!!")

    centroid_index = np.random.choice(boxes_num, 1)[0]
    print('centroid_index:',centroid_index)
    centroids.append(boxes[centroid_index])

    print(centroids[0].w,centroids[0].h)

    for centroid_index in range(0,n_anchors-1):

        sum_distance = 0
        distance_thresh = 0
        distance_list = []
        cur_sum = 0

        for box in boxes:
            min_distance = 1
            for centroid_i, centroid in enumerate(centroids):
                distance = (1 - box_iou(box, centroid))
                if distance < min_distance:
                    min_distance = distance
            sum_distance += min_distance
            distance_list.append(min_distance)

        distance_thresh = sum_distance*np.random.random()

        for i in range(0,boxes_num):
            cur_sum += distance_list[i]
            if cur_sum > distance_thresh:
                centroids.append(boxes[i])
                print(boxes[i].w, boxes[i].h)
                break

    return centroids


# 进行 k-means 计算新的centroids
# boxes是所有bounding boxes的Box对象列表
# n_anchors是k-means的k值
# centroids是所有簇的中心
# 返回值new_centroids 是计算出的新簇中心
# 返回值groups是n_anchors个簇包含的boxes的列表
# 返回值loss是所有box距离所属的最近的centroid的距离的和
def do_kmeans(n_anchors, boxes, centroids):
    loss = 0
    groups = []
    new_centroids = []
    for i in range(n_anchors):
        groups.append([])
        new_centroids.append(Box(0, 0, 0, 0))

    for box in boxes:
        min_distance = 1
        group_index = 0
        for centroid_index, centroid in enumerate(centroids):
            distance = (1 - box_iou(box, centroid))
            if distance < min_distance:
                min_distance = distance
                group_index = centroid_index
        groups[group_index].append(box)
        loss += min_distance
        new_centroids[group_index].w += box.w
        new_centroids[group_index].h += box.h

    for i in range(n_anchors):
        new_centroids[i].w /= len(groups[i])
        new_centroids[i].h /= len(groups[i])

    return new_centroids, groups, loss


# 计算给定bounding boxes的n_anchors数量的centroids
# label_path是训练集列表文件地址
# n_anchors 是anchors的数量
# loss_convergence是允许的loss的最小变化值
# grid_size * grid_size 是栅格数量
# iterations_num是最大迭代次数
# plus = 1时启用k means ++ 初始化centroids
def compute_centroids(train_txt,n_anchors,loss_convergence,resized_w_h,resize_type,iterations_num,plus):

    boxes = []
    train_file = open(train_txt)
    
    '''changes for yolov3'''
    netw,neth = resized_w_h[0],resized_w_h[1]
    count = 0
    for line in train_file:
        if resize_type==0:
            new_w = netw
            new_h = neth
        else:
            anno_file = line.strip().replace('JPEGImages','Annotations').split('.')[0]+'.xml' 			
            root = ET.parse(anno_file).getroot()
            img_w = int(float(root.find('size').find('width').text))
            img_h = int(float(root.find('size').find('height').text))
            if netw/img_w < neth/img_h:
                new_w = netw
                new_h = img_h*(netw/img_w)
            else:
                new_w = img_w*(neth/img_h)
                new_h = neth

        label_file = line.strip().replace('JPEGImages','labels').split('.')[0]+'.txt'
        f = open(label_file)            
        for info in f:
            temp = info.strip().split()
            if len(temp) > 1:
                boxes.append(Box(0,0,float(temp[3])*new_w/netw,float(temp[4])*new_h/neth))
        count += 1
        if count%1000==0:
            print("count [%d]"%count)

    if plus:
        centroids = init_centroids(boxes, n_anchors)
    else:
        centroid_indices = np.random.choice(len(boxes), n_anchors)
        centroids = []
        for centroid_index in centroid_indices:
            centroids.append(boxes[centroid_index])

    # iterate k-means
    centroids, groups, old_loss = do_kmeans(n_anchors, boxes, centroids)
    iterations = 1
    while (True):
        centroids, groups, loss = do_kmeans(n_anchors, boxes, centroids)
        iterations = iterations + 1
        print('============== iter %d=============='%iterations)
        print("loss = %f" % loss)
        if abs(old_loss - loss) < loss_convergence or iterations > iterations_num:
            break
        old_loss = loss
        for centroid in centroids:
            print(centroid.w*netw , centroid.h*neth)
             
    # print result
    centroids = sorted(centroids,key=lambda box:(box.w*box.h))
    for centroid in centroids:
        print('%.3f,%.3f,'%(centroid.w*netw,centroid.h*neth),end=' ')
        #print('%.3f,%.3f,'%(centroid.w*netw,centroid.h*neth))
    #绘图
    print('\n no graph!')
#    for box in boxes:
#        plt.scatter(box.w,box.h,c='b',s=3)
#    for centroid in centroids:
#        plt.scatter(centroid.w,centroid.h,c='r',s=5)
#    plt.show()

def main():
    train_txt = '/home/dataset/zhangyan/15_yolov3_4_2/train_img_path.txt'
    n_anchors = 9
    loss_convergence = 0.00001
    #YOLOv3 中anchor boxes是相对于网络输入的宽高的比例[netw,neth]
    resized_w_h = [416,416]
    resize_type = 0 # 0 paved ,1 embed
    iterations_num = 300
    plus = 1
    compute_centroids(train_txt,n_anchors,loss_convergence,resized_w_h,resize_type,iterations_num,plus)

if __name__=='__main__':
    main()
