# -*-coding:utf-8-*-
import tensorflow as tf
import numpy as np
import os
import math
import time


@tf.function
def transform_targets_for_output(y_true, grid_h, grid_w, anchor_idxs):
    # y_true: (N, boxes, (class, x, y, w, h, best_anchor))
    N = tf.shape(y_true)[0]

    # y_true_out: (N, grid, grid, anchors, [x, y, w, h, obj, class])
    y_true_out = tf.zeros(
        (N, int(grid_h), int(grid_w), tf.shape(anchor_idxs)[0], 6))

    anchor_idxs = tf.cast(anchor_idxs, tf.int32)

    indexes = tf.TensorArray(tf.int32, 1, dynamic_size=True)
    updates = tf.TensorArray(tf.float32, 1, dynamic_size=True)
    idx = 0
    grid_size = tf.cast([int(grid_w), int(grid_h)], dtype=tf.float32)

    for i in tf.range(N):
        for j in tf.range(tf.shape(y_true)[1]):
            if tf.equal(y_true[i][j][2], 0):
                continue
            anchor_eq = tf.equal(
                anchor_idxs, tf.cast(y_true[i][j][5], tf.int32))

            if tf.reduce_any(anchor_eq):
                box = y_true[i][j][1:5]
                box_xy = y_true[i][j][1:3]

                anchor_idx = tf.cast(tf.where(anchor_eq), tf.int32)
                # grid_xy = tf.cast([box_xy[0] // (1/grid_w), box_xy[1] // (1/grid_h)], tf.int32)
                grid_xy = tf.cast(tf.multiply(box_xy, grid_size), tf.int32)

                indexes = indexes.write(
                    idx, [i, grid_xy[1], grid_xy[0], anchor_idx[0][0]])
                updates = updates.write(
                    idx, [box[0], box[1], box[2], box[3], 1, y_true[i][j][0]])
                idx += 1

    return tf.tensor_scatter_nd_update(
        y_true_out, indexes.stack(), updates.stack())


'''
    anchors: 从小到大排列
    anchor_masks: 类似这样[[6, 7, 8], [3, 4, 5], [0, 1, 2]]
'''


class Dataset(object):
    def __init__(self, image_ann_path, anchors_path,
                 batch_size, class_num, in_hw, max_boxes=100, validation_split=0.1):
        self.in_hw = np.array(in_hw)
        self.batch_size = batch_size
        self.class_num = class_num
        self.validation_split = validation_split
        self.max_boxes = max_boxes

        if os.path.exists(image_ann_path):
            self.image_ann = np.load(image_ann_path, allow_pickle=True)
            np.random.shuffle(self.image_ann)
            self.train_num = int(len(self.image_ann) * (1 - validation_split))
            self.valid_num = len(self.image_ann) - self.train_num
            self.train_list = self.image_ann[:self.train_num]
            self.valid_list = self.image_ann[self.train_num:]
        else:
            print('image_ann_path: %s does not exist' % image_ann_path)
            exit(-1)

        if os.path.exists(anchors_path):
            self.anchors = np.load(anchors_path, allow_pickle=True)
            self.n_layer = self.anchors.shape[0]
            self.n_anchor = self.anchors.shape[1]
            self.anchor_masks = np.zeros(shape=(self.n_layer, self.n_anchor)).astype(np.int)
            for l in range(self.n_layer):
                for a in range(self.n_anchor):
                    self.anchor_masks[l][a] = (self.n_layer - (l + 1)) * self.n_anchor + a
            self.anchors = np.reshape(self.anchors, newshape=(-1, 2))
            self.anchors = np.array(sorted(self.anchors, key=lambda x: (x[0])))  # 从小到大排序
            self.out_hw = np.array([self.in_hw // 32 * math.pow(2, i)
                                    for i in range(self.n_layer)]).astype(np.int)
            print('Model output size:')
            print(self.out_hw)
        else:
            print('anchors_path %s does not exist' % anchors_path)
            exit(-1)

    def create_dataset(self, image_ann, trainning):
        def gen():
            while True:
                for img_path, true_box, _ in image_ann:
                    # NOTE use copy avoid change the annotaion value !
                    yield str(img_path), np.copy(true_box)

        def preprocess(img_path, bbox_npy):
            x = tf.io.read_file(img_path)
            x = tf.cond(
                tf.image.is_jpeg(x),
                lambda: tf.image.decode_jpeg(x, channels=3),
                lambda: tf.image.decode_png(x, channels=3)
            )
            x = tf.image.resize(x, (self.in_hw[0], self.in_hw[1]))
            y = tf.convert_to_tensor(bbox_npy)

            paddings = [[0, self.max_boxes - tf.shape(y)[0]], [0, 0]]
            y = tf.pad(y, paddings)

            return x, y

        def transform_image_label(x, y, anchors, anchor_masks, img_h, img_w):
            if trainning:
                # do augumentation
                print('')

            image = tf.keras.applications.imagenet_utils.preprocess_input(x, mode='tf')

            y_outs = []
            grid_h = img_h // 32
            grid_w = img_w // 32

            # calculate anchor index for true boxes
            anchors = tf.cast(anchors, tf.float32)
            anchor_area = anchors[..., 0] * anchors[..., 1]
            box_wh = y[..., 3:5]
            box_wh = tf.tile(tf.expand_dims(box_wh, -2),
                             (1, 1, tf.shape(anchors)[0], 1))
            box_area = box_wh[..., 0] * box_wh[..., 1]
            intersection = tf.minimum(box_wh[..., 0], anchors[..., 0]) * \
                           tf.minimum(box_wh[..., 1], anchors[..., 1])
            iou = intersection / (box_area + anchor_area - intersection)
            anchor_idx = tf.cast(tf.argmax(iou, axis=-1), tf.float32)
            anchor_idx = tf.expand_dims(anchor_idx, axis=-1)
            y = tf.concat([y, anchor_idx], axis=-1)

            # return image, y
            for anchor_idxs in anchor_masks:
                y_outs.append(transform_targets_for_output(
                    y, grid_h, grid_w, anchor_idxs))
                grid_h *= 2
                grid_w *= 2

            return image, tuple(y_outs)

        shuffle_size = self.train_num if trainning is True else self.valid_num
        dataset = tf.data.Dataset.from_generator(gen, (tf.string, tf.float32), ([], [None, 5]))
        dataset = dataset.shuffle(shuffle_size, reshuffle_each_iteration=True)
        dataset = dataset.map(lambda x, y: (preprocess(x, y)), num_parallel_calls=tf.data.experimental.AUTOTUNE)
        # 要先在preprocess中将图像resize到一样的大小, 不然batch时会报错
        dataset = dataset.batch(self.batch_size)
        dataset = dataset.map(lambda x, y: (
            transform_image_label(x, y, self.anchors, self.anchor_masks, self.in_hw[0], self.in_hw[1])
        ), num_parallel_calls=tf.data.experimental.AUTOTUNE)
        dataset = dataset.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)

        return dataset

    def set_dataset(self):
        self.train_dataset    = self.create_dataset(self.train_list, trainning=True)
        self.train_epoch_step = len(self.train_list) // self.batch_size
        self.valid_dataset    = self.create_dataset(self.valid_list, trainning=False)
        self.valid_epoch_step = len(self.valid_list) // self.batch_size


if __name__ == "__main__":
    image_ann_path = 'data/k210_img_ann.npy'
    anchors_path = 'data/k210_anchor.npy'
    class_num = 10
    in_hw = (224, 320)
    batch_size = 16

    ds = Dataset(image_ann_path, anchors_path, batch_size, class_num, in_hw)
    ds.set_dataset()

    num = ds.train_num // batch_size
    i = 0
    tic = time.time()
    for batch, (images, labels) in enumerate(ds.train_dataset):
        if i > num:
            break
        i += 1
        print('------ %d / %d' % (i, num))
    toc = time.time()
    print('dataset cost time ', toc - tic)
