# data_generator.py
# python 3.6.3
import os
import cv2 as cv
import numpy as np
from keras.utils import Sequence

from config import batch_size
from config import fg_path, a_path
from config import img_cols, img_rows, full_h, full_w
from config import input_mean, input_std

import random

def get_rgbd(name):
    file = os.path.join(fg_path, name + '.PNG')
    im_rgbd = cv.imread(file, -1)
    assert len(im_rgbd.shape)==3 and im_rgbd.shape[2]==4
    return im_rgbd


def get_rgb(name):
    file = os.path.join(fg_path, name + '.PNG')
    im_rgbd = cv.imread(file, -1)
    assert len(im_rgbd.shape)==3 and im_rgbd.shape[2]==4
    return im_rgbd[:,:,:3]


def get_alpha(name):
    file = os.path.join(a_path, name + '.PNG')
    im_alpha = cv.imread(file, 0)
    return im_alpha


def get_mask(name):
    file = os.path.join(a_path, name + '.PNG')
    im_alpha = cv.imread(file, 0)
    _, mask = cv.threshold(im_alpha, 0, 255, cv.THRESH_BINARY)
    return mask


def random_offset(full_size, crop_size):
    assert full_size[0] >= crop_size[0] and full_size[1] >= crop_size[1]
    offset = [0, 0]
    if full_size[0] > crop_size[0]:
        offset[0] = np.random.randint(full_size[0] - crop_size[0]) // 2
    if full_size[1] > crop_size[1]:
        offset[1] = np.random.randint(full_size[1] - crop_size[1]) // 2
    return tuple(offset)


def depth_random_scale_shift(x, y, max_val:float):
    assert isinstance(x, np.ndarray)
    assert isinstance(y, np.ndarray)
    max_ = float(y.max())
    mask_x = x > 0
    mask_y = y > 0
    assert max_ <= max_val
    scale_ = 0.2 + np.random.rand() * (max_val / max_ - 0.2)
    x = np.minimum(x * scale_, max_val)
    y = np.minimum(y * scale_, max_val)
    max_ = y.max()
    shift_ = np.random.rand() * (max_val - max_)
    x = np.minimum(x + shift_, max_val) * mask_x
    y = np.minimum(y + shift_, max_val) * mask_y
    return x, y


def normalize_input(x): # min-max norm into 0-1
    assert isinstance(x, np.ndarray)
    return (np.array(x, np.float32) / 255.0 - input_mean) / input_std


def denormalize_input(x):
    assert isinstance(x, np.ndarray)
    return np.array(((x * input_std) + input_mean)*255, np.uint8)


def normalize_output(y):
    assert isinstance(y, np.ndarray)
    return np.array(y, np.float32) / 255.0


def denormalize_output(y):
    assert isinstance(y, np.ndarray)
    return np.array(np.clip(y, 0.0, 1.0) * 255, np.uint8)


class DataGenSequence(Sequence):
    def __init__(self, usage):
        self.usage = usage

        filename = '{}_names.txt'.format(usage)
        with open(filename, 'r') as f:
            self.names = f.read().splitlines()

        np.random.shuffle(self.names)

    def __len__(self):
        return len(self.names) // batch_size

    def __getitem__(self, idx):
        batch_x = np.empty((batch_size, img_rows, img_cols, 3), dtype=np.float32)
        batch_y = np.empty((batch_size, img_rows, img_cols, 1), dtype=np.float32)

        for i_batch in range(batch_size):
            name = self.names[idx * batch_size + i_batch]
            rgb = get_rgb(name)
            mask = get_mask(name)

            assert img_rows == img_cols
            crop_size = (img_cols, img_rows)
            full_size = (full_w, full_h)

            rgb = cv.resize(rgb, full_size)
            mask = cv.resize(mask, full_size)

            x, y = random_offset(full_size, crop_size)
            rgb = rgb[y:y+crop_size[1], x:x+crop_size[0]]
            mask = mask[y:y+crop_size[1], x:x+crop_size[0]]

            # Flip images 0/90/180/270 degrees
            rot_id = np.random.randint(4)
            rgb = np.rot90(rgb, rot_id)
            mask = np.rot90(mask, rot_id)

            batch_x[i_batch, :, :, :] = normalize_input(rgb)
            batch_y[i_batch, :, :, 0] = normalize_output(mask)

        has_inf = np.any(np.isinf(batch_x))
        has_nan = np.any(np.isnan(batch_x))
        if has_inf:
            print('INPUT: !!!!!!!!!!!!!!!!!!!!INF VALUES FOUND!!!!!!!!!!!!!!!!!')
            exit(-1)
        if has_nan:
            print('INPUT: !!!!!!!!!!!!!!!!!!!!NAN VALUES FOUND!!!!!!!!!!!!!!!!!')
            exit(-1)
        assert not (has_inf or has_nan)
        return batch_x, batch_y

    def on_epoch_end(self):
        np.random.shuffle(self.names)


def train_gen():
    return DataGenSequence('train')


def valid_gen():
    return DataGenSequence('valid')


def GetRandomSamples(类别, 个数): # 类别可以是训练，也可是验证
    输入尺寸 = (img_cols, img_rows)
    整图尺寸 = (full_w, full_h)

    文件路径 = '{}_names.txt'.format(类别)
    with open(文件路径, 'r') as f:
        文件列表 = f.read().splitlines()
    assert len(文件列表) >= 个数
    文件列表 = [文件列表[i] for i in random.sample(list(range(len(文件列表))), 个数)]
    输入样本 = [None] * 个数
    监督样本 = [None] * 个数
    for 序号 in range(个数):
        图像 = get_rgb(文件列表[序号])
        视差 = get_mask(文件列表[序号])
        图像 = cv.resize(图像, 整图尺寸)
        视差 = cv.resize(视差, 整图尺寸)
        视差 = np.expand_dims(视差, -1)
        图像 = normalize_input(图像)
        视差 = normalize_output(视差)
        左边, 上边 = random_offset(整图尺寸, 输入尺寸)
        输入样本[序号] = 图像[上边:上边 + 输入尺寸[1], 左边:左边 + 输入尺寸[0]]
        监督样本[序号] = 视差[上边:上边 + 输入尺寸[1], 左边:左边 + 输入尺寸[0]]
    return np.stack(输入样本, axis=0), np.stack(监督样本, axis=0), 文件列表