# _*_ coding:utf-8 _*_
"""
__Author__    :  yuan
__Date__      :  2020/4/5
__File__      :  data.py
__Desc__      :
"""
import os
import tensorflow as tf
import random
import numpy as np
from keras import backend as K
# TODO:
H=900
W=900
numclass=2
def _get_data_list(img_root=r"E:\Data\ZResource\Datasets\animals\animals"):
	dirs = os.listdir(img_root)
	dirs.sort()
	images = []
	labels = []
	for i, cdir in enumerate(dirs):
		cdir = os.path.join(img_root, cdir)
		for img in os.listdir(cdir):
			imp = os.path.join(cdir, img)
			images.append(imp)
			labels.append(i)
	return images, labels


def enhance_image(image: tf.Tensor, resize=(H,W)):
	# 裁剪
	img = tf.identity(image)
	if random.randint(0, 2):
		img = tf.image.adjust_brightness(img, delta=0.5)
	if random.randint(0, 2):
		img = tf.image.adjust_contrast(img, 0.5)
	if random.randint(0, 2):
		img = tf.image.flip_left_right(img)
	if random.randint(0, 2):
		img = tf.image.adjust_hue(image, 0.2)
	img = tf.image.resize_images(img, size=tf.constant(resize, dtype=tf.int32))
	return img


def _parse_img(img, label):
	image = tf.io.read_file(img)
	image = tf.image.decode_jpeg(image, channels=3)
	image = enhance_image(image)
	# image = tf.image.per_image_standardization
	image /= 255.
	label = tf.one_hot(label, numclass)
	return image, label

def mixup(x1,y1,x2,y2,alpha):
	beta = np.random.beta(alpha,alpha+1000)
	x = beta*x1+(1-beta)*x2
	y = beta*y1+(1-beta)*y2
	return x,y

# 用datasetapi:
def get_data_iter(img_root, iter=500, batch=16):
	(images, labels) = _get_data_list(img_root)
	ilpairs = list(zip(images, labels))
	random.shuffle(ilpairs)
	images, labels = zip(*ilpairs)
	images = list(images)
	labels = list(labels)
	images = tf.convert_to_tensor(images, tf.string)
	labels = tf.convert_to_tensor(labels, tf.uint8)

	train_pair = tf.data.Dataset.from_tensor_slices((images, labels))
	train_pair = train_pair.map(_parse_img)  # 之后可以接一个train_pair.cache(),缓存
	# train_pair = train_pair.shuffle(3000).batch(batch).repeat(iter)
	# TODO:在这里应用tf 实现 mixup
	train_pair = train_pair.apply(tf.data.experimental.shuffle_and_repeat(buffer_size=3000, count=iter))
	train_pair = train_pair.batch(batch)
	train_pair = train_pair.prefetch(200)
	train_iter = train_pair.make_one_shot_iterator().get_next()
	return train_iter

def binary_focal_loss(y_true,y_pred,gamma=2, alpha=0.25):
	"""
	Binary form of focal loss.
	适用于二分类问题的focal loss

	focal_loss(p_t) = -alpha_t * (1 - p_t)**gamma * log(p_t)
		where p = sigmoid(x), p_t = p or 1 - p depending on if the label is 1 or 0, respectively.
	References:
		https://arxiv.org/pdf/1708.02002.pdf
	Usage:
	 model.compile(loss=[binary_focal_loss(alpha=.25, gamma=2)], metrics=["accuracy"], optimizer=adam)
	"""
	alpha = tf.constant(alpha, dtype=tf.float32)
	gamma = tf.constant(gamma, dtype=tf.float32)
	y_true = tf.cast(y_true, tf.float32)
	alpha_t = y_true * alpha + (K.ones_like(y_true) - y_true) * (1 - alpha)
	p_t = y_true * y_pred + (K.ones_like(y_true) - y_true) * (K.ones_like(y_true) - y_pred) + K.epsilon()
	focal_loss = - alpha_t * K.pow((K.ones_like(y_true) - p_t), gamma) * K.log(p_t)
	return K.mean(focal_loss)


if __name__ == "__main__":
	img_root = r"E:\Data\ZResource\Datasets\animals\animals"
	ti = get_data_iter(img_root, iter=1, batch=8)
	with tf.Session() as sess:
		for i in range(1):
			p = sess.run(ti)
			print(p[1])
