# datasets.py
# ! 基于keras, 用于处理yolo数据, 生成dataloader
from __future__ import absolute_import

import os
import cv2
import time
import random
import numpy as np
import tensorflow as tf
import tensorflow.keras as keras
import tensorflow.keras.backend as K
import xml.etree.ElementTree as ET

from datasets_utils import mosaic as Mosaic, letterbox_v2 as Letterbox, makeup as Makeup

from image_augment import transform_on_yolo, mixup_on_yolo

class YoloDataLoader(keras.utils.Sequence):
    def __init__(self,batch_size,imagepaths,imagefunc,labelpaths=None,labelfunc=None,together=False,
                 letterbox=True,mosaic=True,makeup=True,shuffle=False):
        """
            together: short for image label together. 
            如果为真, 将自动忽略labelpaths和labelfunc, 数据全部从imagepaths中获取, 数据函数应用全部从imagefunc中执行
            如果为假, 数据分别从imagepaths和labelpaths中获取
        """
        if not together and labelpaths is not None: assert len(imagepaths) == len(labelpaths)

        self.batch_size = batch_size
        self.imagepaths = imagepaths
        self.imagefunc  = imagefunc
        self.labelpaths = labelpaths
        self.labelfunc  = labelfunc
        self.together   = together
        self.letterbox  = letterbox
        self.mosaic     = mosaic
        self.makeup     = makeup
        self.shuffle    = shuffle
        self.random_state = int(time.time()) % 1024

        if self.shuffle: self.random() # ! 初始化后随机打乱

    def random(self):
        # ! 索引列表
        index_list = list(range(len(self.imagepaths)))
        # ! 随机打乱
        random.seed(self.random_state)
        random.shuffle(index_list)
        # ! 按打乱后的索引排序
        self.imagepaths = [self.imagepaths[index] for index in index_list]
        if self.labelpaths is not None:
            self.labelpaths = [self.labelpaths[index] for index in index_list]

    def __len__(self):
        return int(np.ceil(len(self.imagepaths) / float(self.batch_size)))

    def __getitem__(self, idx):
        batch_imagepaths = self.imagepaths[idx * self.batch_size: (idx+1) * self.batch_size]
        if self.together:
            #! 设计的程序中, image和label不可分割, 故mosaic, letterbox, makeup只能在together中生效
            letterbox_auto = random.choice([True,False])
            return self.imagefunc(batch_imagepaths,
                                  mosaic=self.mosaic,
                                  letterbox=self.letterbox,
                                  letterbox_auto=False,
                                  makeup=self.makeup)
        else:
            if self.labelpaths:
                batch_labelpaths = self.labelpaths[idx * self.batch_size: (idx+1) * self.batch_size]
                if self.labelfunc:
                    return self.imagefunc(batch_imagepaths), self.labelfunc(batch_labelpaths)
                else:
                    return self.imagefunc(batch_imagepaths), batch_labelpaths
            else:
                return self.imagefunc(batch_imagepaths)

    def on_epoch_end(self):
        if self.shuffle: self.random() # ! 每轮结束后随机打乱


def preprocess_batch_images(batch_images,prefunc=None):
    if prefunc is None:
        return np.stack(batch_images) / 255.0
    else:
        batch = []
        for image in batch_images:
            image = prefunc(image)      # 图像预处理函数
            image = image / 255.0       # 归一化
            batch.append(image)
        return np.stack(batch)

def preprocess_single_labels(labels,imgsz,strides,grid_anchors,anchor_t=4,gt=False):
    # ! grid_anchors shape: (nl,na,2)
    nl, na, _ = grid_anchors.shape
    assert nl == len(strides)

    if len(labels.shape) < 2:
        labels = labels[None]
    if gt: labels[:,0] += 1
    
    nt = labels.shape[0]
    targets = labels.copy() # (nt,5) (class,xc,yc,w,h)
    gain = np.ones(shape=(6,)) #! 为了与targets的形状匹配
    ai = np.tile(np.arange(na).reshape(na,1,1),[1,nt,1]) #! (na) -> (na,1,1) -> (na,nt,1)
    targets = np.concatenate([np.tile(targets[None],[na,1,1]),ai],axis=2)
    g = 0.5
    off = np.array([[0,0],
                    [1,0],[0,1],[-1,0],[0,-1]]) * g

    tcls, tbox, indices, anch = [], [], [], []
    for l in range(nl):
        grid_anchor = grid_anchors[l]
        nx, ny = imgsz[0] // strides[l], imgsz[1] // strides[l]
        gain[1:5] = [nx,ny,nx,ny]
        
        t = targets * gain
        if nt:
            r = t[:,:,3:5] / grid_anchor[:,None] # (na,nt,2) / (na,1,2)
            j = np.maximum(r,1/r).max(2) < anchor_t # (na,nt) bool值
            t = t[j] # filter, t最后一维按anchor排列 # (?,6)

            gxy = t[:,1:3]
            gxi = gain[2:4] - gxy # inverse 
            i1, i2 = ((gxy % 1 < g) & (gxy > 1)).T # bool值
            i3, i4 = ((gxi % 1 < g) & (gxi > 1)).T # bool值
            j = np.stack([np.ones_like(i1),i1,i2,i3,i4])
            t = np.tile(t[None],[5,1,1])
            t = t[j]

            offsets = (np.zeros_like(gxy)[None] + off[:,None])[j]
        else:
            t = targets[0]
            offsets = 0

        c = t[:,0] 
        gxy = t[:,1:3]
        gwh = t[:,3:5]
        gij = (gxy - offsets).astype(np.int64)
        gi, gj = gij.T # gi对应的是x的坐标, gj对应的是y的坐标
        a = t[:,5].astype(np.int64)
        indices.append([np.clip(gj,0,gain[4]-1),np.clip(gi,0,gain[3]-1),a])
        tbox.append(np.concatenate([gxy-gij,gwh],axis=-1)) # ?返回xy偏移量, 不影响iou计算
        anch.append(grid_anchor[a])
        tcls.append(c)

    return tcls, tbox, indices, anch

def preprocess_batch_labels(batch_labels,nc=80,imgsz=(640,640),strides=None,grid_anchors=None,anchor_t=4,gt=False):
    #! imgsz: (宽, 高)
    if strides is None: strides = [8,16,32]
    if grid_anchors is None:
        grid_anchors = [
            [1.250,1.6250, 2.000,3.7500,  4.12500, 2.8750],
            [1.875,3.8125, 3.875,2.8125,  3.68750, 7.4375],
            [3.625,2.8125, 4.875,6.1875, 11.65625,10.1875],
        ]
    if gt: nc += 1

    nl = len(grid_anchors)
    na = len(grid_anchors[0])//2
    assert nl == len(strides)
    grid_anchors = np.array(grid_anchors,dtype=np.float64).reshape([nl,na,2]) #! (nl,na,2)
    b = len(batch_labels) #! 获取批次数量
    batch_data = [[] for _ in range(nl)] #! 为大中小三个尺寸的图分别建立列表

    for dx, labels in enumerate(batch_labels): #! 遍历每张图片的labels
        tcls, tbox, indices, anch = preprocess_single_labels(labels,imgsz,strides,grid_anchors,anchor_t,gt)
        for ldx in range(nl):
            nx, ny = imgsz[0] // strides[ldx], imgsz[1] // strides[ldx]
            targets = np.zeros(shape=(ny,nx,na,5+nc))
            ltcls = tcls[ldx]
            ltbox = tbox[ldx]
            lty, ltx, lta = indices[ldx]
            tanch = anch[ldx]
            for c, box, y, x, a, anchor in zip(ltcls, ltbox, lty, ltx, lta, tanch):
                c, y, x, a = int(c), int(y), int(x), int(a)
                targets[y,x,a,0:4] = box
                r = box[2:] / anchor
                r = np.maximum(r,1/r).max()
                targets[y,x,a,4] = r
                if gt:
                    targets[y,x,a,6+c] = 1
                else:
                    targets[y,x,a,5+c] = 1

            #! 寻找最佳匹配
            for i in range(nx):
                for j in range(ny):
                    r_values = targets[i,j,:,4]
                    r_indices, = np.where(r_values>0)
                    if not r_indices.any(): continue #! 都小于等于0
                    r_min_value = r_values[r_indices].min() #! 最佳匹配
                    for ai in range(na):
                        if targets[i,j,ai,4] == 0: continue
                        elif targets[i,j,ai,4] == r_min_value:
                            targets[i,j,ai,4] = 2
                        else:
                            targets[i,j,ai,4] = 1

            batch_data[ldx].append(targets)
        
    #! 数据最终整理
    batch_data = [np.array(data).reshape([b,-1,na,5+nc]) for data in batch_data]

    # #! 部分数据验证, 可注释
    # for jdx, _data in enumerate(batch_data):
    #     #! data (b,nx*ny,na,5+nc)
    #     data = _data[0] # (nx*ny,na,5+nc)
    #     data2 = data[np.where(data[:,:,4]>0.5)] # (?,5+nc)
    #     print("%d data: \n"%jdx,data2)
    #     xv = (data2[:,0]+0.5).astype('int')
    #     yv = (data2[:,1]+0.5).astype('int')
    #     print("xv: ",xv)
    #     print("yv: ",yv)
    #     for i,j in zip(xv,yv):
    #         nx, ny = imgsz[0] // strides[jdx], imgsz[1] // strides[jdx]
    #         print("data2: ", _data[0,j*nx+i]) # (i,j) 当前索引应该为 j * nx + i

    return batch_data
 
def preprocess_batch(batch,image_prefunc,label_prefunc,label_readfunc=None,
        mosaic=True,letterbox=True,letterbox_auto=True,makeup=True,
        input_shape=(640,640)):
    if label_readfunc is None: label_readfunc = parse_txt_annotation
    batch_image_paths, batch_label_paths = zip(*batch)
    batch_images = [cv2.imread(image_path) for image_path in batch_image_paths]
    batch_labels = [label_readfunc(label_path) for label_path in batch_label_paths]

    if mosaic: 
        batch_images, batch_labels = Mosaic(batch_images,batch_labels)
    if letterbox: 
        batch_images, batch_labels = Letterbox(batch_images,batch_labels,
            input_shape=input_shape,auto=letterbox_auto)
    if makeup:
        batch_images, batch_labels = Makeup(batch_images,batch_labels)
    return image_prefunc(batch_images), label_prefunc(batch_labels)

def parse_txt_annotation(annotation_path):
    annotation = np.loadtxt(annotation_path,delimiter=' ',dtype='float')
    if len(annotation.shape) < 2:
        annotation = annotation[None]
    return annotation

def parse_xml_annotation(annotation_path,class_dict):
    objects = []

    tree = ET.parse(annotation_path)
    root = tree.getroot()

    width  = float(root.find('size').find('width').text)
    height = float(root.find('size').find('height').text)

    for obj in root.findall('object'):
        name = obj.find('name').text
        xmin = float(obj.find('bndbox').find('xmin').text)
        ymin = float(obj.find('bndbox').find('ymin').text)
        xmax = float(obj.find('bndbox').find('xmax').text)
        ymax = float(obj.find('bndbox').find('ymax').text)
        xc = (xmax + xmin) / 2 / width
        yx = (ymax + ymin) / 2 / height
        w  = (xmax - xmin) / 2 / width
        h  = (ymax - ymin) / 2 / height
        
        objects.append([class_dict[name],xc,yx,w,h])

    return np.array(objects)        


def _make_grid(imgsz,na=3,strides=[8,16,32],dtype=np.float32):
    """
        imgsz(w,h)
    """
    grids = []
    for i in range(len(strides)):
        nx, ny = imgsz[0]//strides[i], imgsz[1]//strides[i]
        xv, yv = np.meshgrid(np.arange(nx,dtype=dtype),np.arange(ny,dtype=dtype))
        #! 列坐标, 行坐标 = meshgrid(列数,行数) -> 生成维度信息 (行数, 列数) 即 (ny,nx,2)
        grid = np.stack([xv,yv],axis=-1)
        #! stack() 哪个张量在前, 则在stack时位置靠前
        #! (ny,nx,2) 最后一维元素信息如下: (x坐标,y坐标), 最后一维元素之间的变化是: x坐标先变, y坐标后变
        #! 从图像角度来看坐标, 即: 从左向右, 从上到下, 一次扫过图像
        grid = np.tile(np.reshape(grid,[-1,1,2]),[1,na,1])
        #! reshape之后, xy坐标变化顺序不变 维度信息为 (ny*nx,na,2)

        # ! 必须将grid和conv的结果按照相同的方式处理
        grids.append(grid) # 会自动广播 (nx*ny,2) -> (nx*ny,3,2)
    return grids


class YOLODataset(keras.utils.Sequence):
    """
        进入的box是cxcywh的比例坐标
        返回的box是cxcywh的比例坐标
    """

    def __init__(self, batch_size, data_folder, transform, sub_data_folder=None, output_shape=(640,640), shuffle=False):
        """
            output_shape: 输出图片大小, (w, h)
        """
        self.transform = transform
        assert transform in {'TRAIN', 'TEST'}
        self.data_folder = data_folder

        images_dir = os.path.join(data_folder, 'images/%s'%sub_data_folder) if sub_data_folder is not None else os.path.join(data_folder, 'images')
        labels_dir = os.path.join(data_folder, 'labels/%s'%sub_data_folder) if sub_data_folder is not None else os.path.join(data_folder, 'labels')
        imagenames = [f.split(".")[0] for f in os.listdir(images_dir)]
        labelnames = [f.split(".")[0] for f in os.listdir(labels_dir)]
        names = list(set(imagenames) & set(labelnames))
        self.raw_images_paths = [os.path.join(images_dir, "%s.jpg"%name) for name in names]
        self.raw_labels_paths = [os.path.join(labels_dir, "%s.txt"%name) for name in names]

        assert len(self.raw_images_paths) == len(self.raw_labels_paths)

        self.indices = list(range(len(self.raw_images_paths)))
        self.images_paths = [self.raw_images_paths[i] for i in self.indices]
        self.labels_paths = [self.raw_labels_paths[i] for i in self.indices]

        self.batch_size = batch_size
        self.output_shape = output_shape

        self.shuffle = shuffle

        if self.shuffle: self.random()

    def __len__(self):

        return int(np.ceil( len(self.images_paths) / float(self.batch_size) ))
    
    def __getitem__(self, i):
        
        batch_images_paths = self.images_paths[i * self.batch_size: (i+1) * self.batch_size]
        batch_labels_paths = self.labels_paths[i * self.batch_size: (i+1) * self.batch_size]

        batch_images = list()
        batch_labels = list()

        BATCH_SIZE = 16

        if self.batch_size >= BATCH_SIZE: #! batch_size大于等于16时, 默认使用mixup方法
            image_list  = list()
            boxes_list  = list()
            labels_list = list()
            difficulties_list = list()

        for j, (image_path, label_path) in enumerate(zip(batch_images_paths, batch_labels_paths)):

            image = cv2.imread(image_path)
            label = np.loadtxt(label_path)
            if len(label.shape) < 2: label = label[None]
            boxes = label[:, 1:]
            class_labels = label[:, 0]
            difficulties = np.zeros_like(class_labels)

            if self.batch_size >= BATCH_SIZE: #! batch_size大于等于16时, 默认使用mixup方法
                image_list.append(image)
                boxes_list.append(boxes)
                labels_list.append(class_labels)
                difficulties_list.append(difficulties)

            image, boxes, class_labels, difficulties = \
                transform_on_yolo(image, boxes, class_labels, difficulties, split=self.transform, new_wh=self.output_shape)
            
            if len(class_labels.shape) < 2: class_labels = class_labels[:, None]

            image_id = j * np.ones(shape=(boxes.shape[0], 1))

            label = np.concatenate([image_id, class_labels, boxes], axis=-1)
            
            batch_images.append(image)
            batch_labels.append(label)

        num = min(self.batch_size // BATCH_SIZE, len(batch_images) // BATCH_SIZE)

        for i in range(num):
            sub_indice = random.randint(i * BATCH_SIZE, \
                min((i+1) * BATCH_SIZE - 1), i * BATCH_SIZE + len(batch_images) -1)
            if random.random() < 0.5: #! 缩放拼接
                image, boxes, class_labels, difficulties = \
                    mixup_on_yolo(image_list[i*BATCH_SIZE: (i+1)*BATCH_SIZE], 
                                boxes_list[i*BATCH_SIZE: (i+1)*BATCH_SIZE], 
                                labels_list[i*BATCH_SIZE: (i+1)*BATCH_SIZE], 
                                difficulties_list[i*BATCH_SIZE: (i+1)*BATCH_SIZE],
                                self.output_shape)
            else: #! 剪裁拼接
                image, boxes, class_labels, difficulties = \
                    mixup_on_yolo(image_list[i*BATCH_SIZE: (i+1)*BATCH_SIZE], 
                                boxes_list[i*BATCH_SIZE: (i+1)*BATCH_SIZE], 
                                labels_list[i*BATCH_SIZE: (i+1)*BATCH_SIZE], 
                                difficulties_list[i*BATCH_SIZE: (i+1)*BATCH_SIZE],
                                self.output_shape,
                                crop=True)
                
            batch_images[sub_indice] = image

            if len(class_labels.shape) < 2: class_labels = class_labels[:, None]

            image_id = sub_indice * np.ones(shape=(boxes.shape[0], 1))

            label = np.concatenate([image_id, class_labels, boxes], axis=-1)

            batch_labels[sub_indice] = label

        batch_images = np.stack(batch_images)
        batch_labels = np.concatenate(batch_labels, axis=0)
        
        return batch_images, batch_labels

    def random(self):
        random.shuffle(self.indices)
        self.images_paths = [self.raw_images_paths[i] for i in self.indices]
        self.labels_paths = [self.raw_labels_paths[i] for i in self.indices]

    def on_epoch_end(self):
        if self.shuffle: self.random()
