import numpy as np
from PIL import Image
import os

from config import model_type, scale_num, type_dict, Input_shape, data_path, num_class, MAX_BOX_NUM


def read_classes(classes_path):
    with open(classes_path) as f:
        class_names = f.readlines()
    class_names = [c.strip() for c in class_names]
    return class_names


def read_anchors(anchors_path):
    with open(anchors_path) as f:
        anchors = f.readline()
        anchors = [float(x) for x in anchors.split(',')]
        anchors = np.array(anchors).reshape(-1, 2)
    return anchors


class Data_Gen():
    def __init__(self, save_path):
        files = os.listdir(save_path)
        self.save_path = save_path
        self.fileList = []
        self.pkg_idx = 0
        for f in files:
            if (os.path.isfile(save_path + '/' + f)):
                self.fileList.append(f)
    
    def load_pkg(self, state):
        print("load file: "+self.fileList[self.pkg_idx])
        data = np.load(self.save_path + '/' + self.fileList[self.pkg_idx])
        state[1] = self.fileList[self.pkg_idx]

        if self.pkg_idx < len(self.fileList) - 1:
            self.pkg_idx += 1
        else:
            self.pkg_idx = 0
            state[0]=True
        
        return data['image_data'], data['box_data'], data['image_shape'], \
               [data['y_true0'], data['y_true1'],data['y_true2']]




def save_np_data(annotation_path, save_path, input_shape, anchors, num_classes, max_boxes=MAX_BOX_NUM,
                 pkg_capacity=2000, need_shufle=True):
    """
    processes the data into standard shape
    :param annotation_path: path_to_image box1,box2,...,boxN with boxX: x_min,y_min,x_max,y_max,class_index
    :param save_path: saver at "/home/minh/stage/train.npz"
    :param input_shape: (416, 416)
    :param max_boxes: 100: maximum number objects of an image
    :param load_previous: for 2nd, 3th, .. using
    :return: image_data [N, 416, 416, 3] not yet normalized, N: number of image
             box_data: box format: [N, 100, 6], 100: maximum number of an image
                                                6: top_left{x_min,y_min},bottom_right{x_max,y_max},class_index (no space)
                                                /home/minh/keras-yolo3/VOCdevkit/VOC2007/JPEGImages/000012.jpg 156,97,351,270,6
    """
    # return data['image_data'], data['box_data'], data['image_shape'], [data['y_true']]
    image_data = []
    box_data = []
    image_shape = []
    pkg_idx = 1
    pkg_process=1
    img_idx=0
    with open(annotation_path) as f:
        GG = f.readlines()
        img_num=len(GG)
        if need_shufle:
            np.random.shuffle(GG)
        
        for line in (GG):
            if line == '\n':
                img_num-=1
                continue
            if line.startswith('#'):
                img_num-=1
                continue
            
            line = line.split('.png')
            filename = line[0] + '.png'
            try:
                image = Image.open(filename)
                img_idx+=1
            except FileNotFoundError:
                print("not found file " + filename)
                continue
            # For the case 2
            # boxed_image is the resized image of (416, 416), shape_image is original size
            boxed_image, shape_image = letterbox_image(image, tuple(reversed(input_shape)))
            # for the case 1
            # boxed_image, shape_image = resize_image(image, tuple(reversed(input_shape)))
            image_data.append(np.array(boxed_image, dtype=np.uint8))  # pixel: [0:255] uint8:[-128, 127]
            image_shape.append(np.array(shape_image))
            
            # 一幅图内
            boxes = np.zeros((max_boxes, 8), dtype=np.int32)
            # boxes2 = np.zeros((max_boxes, 8), dtype=np.int32)
            # correct the BBs to the image resize
            # label, b1x, b1y, b2x, b2y, seedx, seedy
            info_line = line[1]
            info_line = info_line.split()
            for i, box in enumerate(info_line):
                if i < max_boxes:
                    boxes[i][:7] = np.array(list(map(int, box.split(','))))
                else:
                    break
                image_size = np.array(image.size)
                input_size = np.array(input_shape[::-1])  # inverse 没意义
                # for case 2
                # image size after crop w,h
                new_size = (image_size * np.min(input_size / image_size)).astype(np.int32)
                # Correct BB to new image
                # # 数据定义：  左上右下
                # boxes2[i:i+1, 0:2] = (boxes[i:i+1, 1:3]*new_size/image_size + (input_size-new_size)/2).astype(np.int32)
                # boxes2[i:i+1, 2:4] = (boxes[i:i+1, 3:5]*new_size/image_size + (input_size-new_size)/2).astype(np.int32)
                # boxes2[i:i+1, 4:6] = (boxes[i:i+1, 5:7]*new_size/image_size + (input_size-new_size)/2).astype(np.int32)
                # boxes2[i:i+1, 6] = i+1   # 顺序
                # boxes2[i:i+1, 7] = type_dict[int(boxes[i:i+1, 0][0])]   # 类型
                # 数据定义：  左上右下  在416中的绝对坐标
                boxes[i:i + 1, 0:2] = (
                            boxes[i:i + 1, 0:2] * new_size / image_size + (input_size - new_size) / 2).astype(np.int32)
                boxes[i:i + 1, 2:4] = (
                            boxes[i:i + 1, 2:4] * new_size / image_size + (input_size - new_size) / 2).astype(np.int32)
                boxes[i:i + 1, 4:6] = (
                            boxes[i:i + 1, 4:6] * new_size / image_size + (input_size - new_size) / 2).astype(np.int32)
                try:
                    boxes[i:i + 1, 6] = type_dict[int(boxes[i:i + 1, 6][0])]  # 类型
                except KeyError:
                    print("key error in case:  " + filename)
                boxes[i:i + 1, 7] = i + 1  # 顺序
            
            box_data.append(boxes)
            
            if img_idx % pkg_capacity == 0 or img_idx==img_num:
                # [num, 2] real
                image_shape = np.array(image_shape)
                # [num, 416, 416]
                image_data = np.array(image_data)
                # [num, max_box, 5]  np.array(list) 相当于 np.stack(axis=0)
                box_data = (np.array(box_data))
                
                # 最后一维度的意义：[(相对cell的中心水平坐标,中心竖直坐标,相对anchor的w,h)]
                #  shape: [(m, 13, 13, 3, 6),(m, 26, 26, 3, 6),(m, 52, 52, 3, 6)]
                y_true = preprocess_true_boxes(box_data, input_shape[0], anchors, num_classes)
                
                if model_type == 'N':
                    np.savez(save_path + '/train_nm_%d.npz' % (pkg_idx), image_data=image_data, box_data=box_data,
                             image_shape=image_shape, y_true0=y_true[0],
                             y_true1=y_true[1], y_true2=y_true[2])
                else:
                    np.savez(save_path + '/train_tn_%d.npz' % (pkg_idx), image_data=image_data, box_data=box_data,
                             image_shape=image_shape, y_true0=y_true[0],
                             y_true1=y_true[1])
                
                print('Saving %d -- %d imgs into %dth pkg ' %
                      (pkg_process,img_idx,pkg_idx) + save_path)
                image_data = []
                box_data = []
                image_shape = []
                pkg_idx += 1
                pkg_process=img_idx+1
    
    # # return image_data, box_data, image_shape
    # return image_data, box_data, image_shape, y_true


def letterbox_image(image, size):
    """resize image with unchanged aspect ratio using padding
    :param: size: input_shape  (419,419)
    :return:boxed_image:
            image_shape: original shape (h, w)
    """
    image_w, image_h = image.size
    image_shape = np.array([image_h, image_w])
    w, h = size  # 419, 419
    new_w = int(image_w * min(w / image_w, h / image_h))
    new_h = int(image_h * min(w / image_w, h / image_h))
    resized_image = image.resize((new_w, new_h), Image.BICUBIC)
    
    boxed_image = Image.new('RGB', size, (128, 128, 128))
    boxed_image.paste(resized_image, ((w - new_w) // 2, (h - new_h) // 2))
    return boxed_image, image_shape


# Partie l'entrainement
def preprocess_true_boxes(true_boxes, Input_shape, anchors, num_classes):
    """
    Preprocess true boxes to training input format
    :param true_boxes: array, shape=(N, 16, 6) 在 416 中的绝对坐标.
    :param input_shape: array-like, hw, multiples of 32, shape = (2,)
    :param anchors: array, shape=(9, 2), wh
    :return: y_true: list(3 array), shape like yolo_outputs, xywh are reletive value 3 array [N,, 13, 13, 3, 85]
    """
    # larger --> smaller
    if model_type == 'N':
        anchor_mask = [[6, 7, 8], [3, 4, 5], [0, 1, 2]]
    else:
        anchor_mask = [[3, 4, 5], [0, 1, 2]]
    
    true_boxes = np.array(true_boxes, dtype=np.float32)
    input_shape = np.array([Input_shape, Input_shape], dtype=np.int32)  # 416,416
    boxes_xy = (true_boxes[..., 0:2] + true_boxes[..., 2:4]) // 2  # [m, 20, 2]  (x, y)center point of BB
    boxes_wh = true_boxes[..., 2:4] - true_boxes[..., 0:2]  # w = x_max - x_min  [m, T, 2]
    # h = y_max - y_min
    # [N, 20, 2] 中心点的相对于全图的位置 [0,1]
    true_boxes[..., 0:2] = boxes_xy / input_shape[::-1]  # hw -> wh   翻转没意义
    # [N, 20, 2] box wh 相对于 全图的大小
    true_boxes[..., 2:4] = boxes_wh / input_shape[::-1]  # hw -> wh
    # [N, 20, 2] 种子点的相对于全图的位置 [0,1]
    true_boxes[..., 4:6] = true_boxes[..., 4:6] / input_shape[::-1]  # hw -> wh
    
    N = true_boxes.shape[0]
    # [(13,13),(26,26),(52,52)] num of grid in this scale. 下面的除数是感受野(2**downsample 的次数)， (13 13)是 top feature map
    grid_shapes = [input_shape // {0: 32, 1: 16, 2: 8}[l] for l in range(3)]
    # grid_shapes = [np.array(input_shape // scale, dtype=np.int) for scale in [32, 16, 8]]  # [2,] ---> [3, 2]
    y_true = [np.zeros((N, grid_shapes[l][0], grid_shapes[l][1], len(anchor_mask[l]), 8 + int(num_classes)),
                       dtype=np.float32) for l in
              range(scale_num)]  # [(m, 13, 13, 3, 25),(m, 26, 26, 3, 25),(m, 52, 52, 3, 25)]
    
    # Expand dim to apply broadcasting.
    anchors = np.expand_dims(anchors, 0)  # [1, 9, 2]  e.g. 10 13
    anchor_maxes = anchors / 2.  # w/2, h/2  [1, 9, 2]  e.g. 5  6.5
    anchor_mins = -anchor_maxes  # -w/2, -h/2  [1, 9, 2]  e.g. -5 -6.5
    # [N, 20]
    valid_mask = boxes_wh[..., 0] > 0  # w>0 True, w=0 False  [m, 20]
    
    for b in (range(N)):  # for all of N image
        # Discard zero rows.
        # [actual_box_num, 2]
        wh = boxes_wh[b, valid_mask[b]]  # image 0: wh [[187, 357]]
        # Expand dim to apply broadcasting.
        if len(wh) == 0:
            continue
        # [actual_box_num, 1,2]
        wh = np.expand_dims(wh, -2)
        box_maxes = wh / 2.
        box_mins = -box_maxes
        # anchor [1 9 2]
        intersect_mins = np.maximum(box_mins, anchor_mins)
        intersect_maxes = np.minimum(box_maxes, anchor_maxes)
        intersect_wh = np.maximum(intersect_maxes - intersect_mins, 0.)
        intersect_area = intersect_wh[..., 0] * intersect_wh[..., 1]
        box_area = wh[..., 0] * wh[..., 1]  # [actual_box_num, 1]
        anchor_area = anchors[..., 0] * anchors[..., 1]  # [1, anchor_num]
        iou = intersect_area / (box_area + anchor_area - intersect_area)  # [actual_box_num, anchor_num]
        
        # Find best anchor index from 0 to 9 for each true box
        best_anchor = np.argmax(iou, axis=-1)  # [actual_box_num]
        
        for t, n in enumerate(best_anchor):
            # 图中第t个box, 采用第 n 个anchor
            for l in range(scale_num):  # 1 in 3 scale
                if n in anchor_mask[l]:  # choose the corresponding mask: best_anchor in [6, 7, 8]or[3, 4, 5]or[0, 1, 2]
                    # 第b幅图的第t个box的中心点在第l scale(总共有grid_shape个格子) 的第 (i,j)个格子
                    i = np.floor(true_boxes[b, t, 0] * grid_shapes[l][1]).astype(
                        np.int32)  # ex: 3+1.2=4.2--> vao ô co y=4
                    j = np.floor(true_boxes[b, t, 1] * grid_shapes[l][0]).astype(
                        np.int32)  # ex: 3+0.5=3.5--> vao o co x=3 --> o (x,y)=(3,4)  # TODO
                    if grid_shapes[l][1] == 13 and (i >= 13 or j >= 13):
                        print(i, b)
                        break
                    # 在第l 尺度下属于第k种anchor（一共有3种）
                    k = anchor_mask[l].index(n)
                    # print("  scale l:", l, "best anchor k:", k, anchors[:, l + n])
                    c = true_boxes[b, t, 6].astype(np.int32)  # idx classes in voc classes
                    
                    # 中心点相对全图位置 和 wh 相对于全图416的比例
                    try:
                        y_true[l][b, j, i, k, 0:6] = true_boxes[b, t,
                                                     0:6]  # l: scale; b; idx image; grid(i:y , j:x); k: best anchor; 0:4: (x,y,w,h)/input_shape
                    except:
                        print(" Index Error!!!! ")
                        print(j, i, k, t)
                    y_true[l][b, j, i, k, 6] = 1  # score = 1  其余非最大IoU的anchor都是0
                    y_true[l][b, j, i, k, 7] = true_boxes[b, t, 7]  # 输入的顺序
                    y_true[l][b, j, i, k, 8 + c] = 1  # classes = 1, the others =0
                    
                    break  # if chon dung mask (scale) ---> exit (for l in range(3))
    
    # [(m, 13, 13, 3, 6),(m, 26, 26, 3, 6),(m, 52, 52, 3, 6)]
    return y_true


if __name__ == '__main__':
    data_path = 'F:/ProjectData/yolo3/newData/AIData3/1aitest'
    input_shape = (Input_shape, Input_shape)  # multiple of 32
    anchors_paths = data_path + '/normal_anchor.txt'
    anchors = read_anchors(anchors_paths)
    
    
    annotation_path = os.path.join(data_path, 'train.txt')
    save_path = os.path.join(data_path, 'valid_np_pkg')
    if not os.path.exists(save_path):
        os.mkdir(save_path)
    
    save_np_data(annotation_path,
                 save_path,
                 input_shape, anchors,
                 num_classes=num_class,
                 pkg_capacity=1100,
                 max_boxes=MAX_BOX_NUM,
                 need_shufle=False)

    data_gen = Data_Gen(save_path)
    is_end = [False,'']

    while not is_end[0]:
        x_train, box_data_train, image_shape_train, y_train \
            = data_gen.load_pkg(is_end)
        number_image_train = np.shape(x_train)[0]
        print("number_image_train", number_image_train)


