# coding: utf-8
from scipy import misc
import numpy as np
import skimage.util as su
from skimage import exposure
import datetime
import time
import os
import cv2
import math
from matplotlib import pyplot as plt
import random
plt.switch_backend('agg')

from keras import utils
from keras.utils import np_utils
from sklearn.model_selection import train_test_split
from keras.preprocessing.image import ImageDataGenerator, img_to_array, load_img


###################################
#######  目标检测 图像增强 ###########
###################################

def show_pic(img, bboxes=None):
    '''
    输入:
        img:图像array
        bboxes:图像的所有boudning box list, 格式为[[x_min, y_min, x_max, y_max]....]
        names:每个box对应的名称
    '''
    cv2.imwrite(r'F:\bigphoto\1.jpg', img)
    img = cv2.imread(r'F:\bigphoto\1.jpg')
    for i in range(len(bboxes)):
        bbox = bboxes[i]
        x_min = bbox[0]
        y_min = bbox[1]
        x_max = bbox[2]
        y_max = bbox[3]
        cv2.rectangle(img, (int(x_min), int(y_min)), (int(x_max), int(y_max)), (0, 255, 0), 3)
    cv2.namedWindow('pic', 0)  # 1表示原图
    cv2.moveWindow('pic', 0, 0)
    cv2.resizeWindow('pic', 1200, 800)  # 可视化的图片大小
    cv2.imshow('pic', img)
    cv2.waitKey(0)
    cv2.destroyAllWindows()
    os.remove(r'F:\bigphoto\1.jpg')
# 图像均为cv2读取
class DataAugmentForObjectDetection():
    def __init__(self, rotation_rate=0.5, max_rotation_angle=5,
                 crop_rate=0.5, shift_rate=0.5, change_light_rate=0.5,
                 add_noise_rate=0.5, flip_rate=0.5,
                 cutout_rate=0.5, cut_out_length=50, cut_out_holes=1, cut_out_threshold=0.5):
        self.rotation_rate = rotation_rate
        self.max_rotation_angle = max_rotation_angle
        self.crop_rate = crop_rate
        self.shift_rate = shift_rate
        self.change_light_rate = change_light_rate
        self.add_noise_rate = add_noise_rate
        self.flip_rate = flip_rate
        self.cutout_rate = cutout_rate
        self.cut_out_length = cut_out_length
        self.cut_out_holes = cut_out_holes
        self.cut_out_threshold = cut_out_threshold
    # 裁剪
    def crop_img_bboxes(self,img,bboxes):
        w,h = img.shape[:2]
        xmin = w
        xmax = 0
        ymin = h
        ymax = 0
        # 寻找一个包含所有框的最小框
        for bbox in bboxes:
            xmin = min(bbox[0],xmin)
            ymin = min(bbox[1],ymin)
            xmax = max(bbox[2],xmax)
            ymax = max(bbox[3],ymax)
        d_to_left = xmin
        d_to_right = w - xmax
        d_to_up = ymin
        d_to_bottom = h - ymax
        # 随机扩展这个最小框
        crop_x_min = int(xmin-random.uniform(0,d_to_left))
        crop_y_min = int(ymin-random.uniform(0,d_to_up))
        crop_x_max = int(xmax +random.uniform(0,d_to_right))
        crop_y_max = int(ymax +random.uniform(0,d_to_bottom))

        # 限制超过边界
        crop_x_min = max(0,crop_x_min)
        crop_y_min = max(0,crop_y_min)
        crop_x_max = min(w,crop_x_max)
        crop_y_max = min(h,crop_y_max)
        crop_bboxes = []
        for bbox in bboxes:
            crop_bboxes.append([bbox[0]-crop_x_min, bbox[1]-crop_y_min, bbox[2]-crop_x_min, bbox[3]-crop_y_min])
        crop_img = img[crop_y_min:crop_y_max,crop_x_min:crop_x_max]
        return crop_img,crop_bboxes
    def rotate_img_bboxes(self,img,bboxes,angle=5, scale=1.):
        """
        REW:弧度＝度×π/180
        圆的周长是半径的 2π倍，所以一个周角（360度）是 2π弧度。 半圆的长度是半径的 π倍，所以一个平角（180度）是 π弧度
        """
        w = img.shape[0]
        h = img.shape[1]
        # 角度变弧度
        rangle=np.deg2rad(angle)
        # calculate new image width and height
        nw = (abs(np.sin(rangle)*h)+abs(np.cos(rangle)*w))*scale  # 见机器学习文档仿射变换.md
        nh = (abs(np.cos(rangle)*h)+abs(np.sin(rangle)*w))*scale
        # 获取旋转矩阵，乘上它就可以旋转了
        rot_mat =cv2.getRotationMatrix2D((nw*0.5,nh*0.5),angle,scale)  # 旋转矩阵
        rot_move = np.dot(rot_mat,np.array([(nw-w)*0.5,(nh-h)*0.5,0]))  # np.dot对应位置相乘再相加
        rot_mat[0,2] += rot_move[0]
        rot_mat[1,2] += rot_move[1]
        # 仿射变换
        rot_img = cv2.warpAffine(img,rot_mat,(int(math.ceil(nw)),int(math.ceil(nh))),flags=cv2.INTER_LANCZOS4)
        # ---------------------- 矫正bbox坐标 ----------------------
        # rot_mat是最终的旋转矩阵
        # 获取原始bbox的四个中点，然后将这四个点转换到旋转后的坐标系下
        rot_bboxes=list()
        for bbox in bboxes:
            xmin = bbox[0]
            ymin = bbox[1]
            xmax = bbox[2]
            ymax = bbox[3]
            point1 = np.dot(rot_mat,np.array([(xmin+xmax)/2,ymin,1]))
            point2 = np.dot(rot_mat,np.array([xmax,(ymin+ymax)/2,1]))
            point3 = np.dot(rot_mat,np.array([(xmin+xmax)/2,ymax,1]))
            point4 = np.dot(rot_mat,np.array([xmin,(ymin+ymax)/2,1]))
            concat = np.vstack([point1,point2,point3,point4])
            concat = concat.astype(np.int32)
            # 得到旋转后的坐标
            rx, ry, rw, rh = cv2.boundingRect(concat)
            rx_min = rx
            ry_min = ry
            rx_max = rx + rw
            ry_max = ry + rh
            rot_bboxes.append([rx_min, ry_min, rx_max, ry_max])
        return rot_img,rot_bboxes

    # 平移
    def shift_pic_bboxes(self, img, bboxes):
        w = img.shape[1]
        h = img.shape[0]
        x_min = w  # 裁剪后的包含所有目标框的最小的框
        x_max = 0
        y_min = h
        y_max = 0
        for bbox in bboxes:
            x_min = min(x_min, bbox[0])
            y_min = min(y_min, bbox[1])
            x_max = max(x_max, bbox[2])
            y_max = max(y_max, bbox[3])

        d_to_left = x_min  # 包含所有目标框的最大左移动距离
        d_to_right = w - x_max  # 包含所有目标框的最大右移动距离
        d_to_top = y_min  # 包含所有目标框的最大上移动距离
        d_to_bottom = h - y_max  # 包含所有目标框的最大下移动距离

        x = random.uniform(-(d_to_left - 1) / 3, (d_to_right - 1) / 3)
        y = random.uniform(-(d_to_top - 1) / 3, (d_to_bottom - 1) / 3)

        M = np.float32([[1, 0, x], [0, 1, y]])  # x为向左或右移动的像素值,正为向右负为向左; y为向上或者向下移动的像素值,正为向下负为向上
        shift_img = cv2.warpAffine(img, M, (img.shape[1], img.shape[0]))

        # ---------------------- 平移boundingbox ----------------------
        shift_bboxes = list()
        for bbox in bboxes:
            shift_bboxes.append([bbox[0] + x, bbox[1] + y, bbox[2] + x, bbox[3] + y])

        return shift_img, shift_bboxes

    # 水平镜像
    def h_filp_pic_bboxes(self, img, bboxes):
        import copy
        flip_img = copy.deepcopy(img)
        if 0.3 < 0.5:  # 0.5的概率水平翻转，0.5的概率垂直翻转
            horizon = True
        else:
            horizon = False
        h, w, _ = img.shape
        if horizon:  # 水平翻转
            flip_img = cv2.flip(flip_img, 1)  # 1是水平，-1是水平垂直
        else:
            flip_img = cv2.flip(flip_img, 0)

        # ---------------------- 调整boundingbox ----------------------
        flip_bboxes = list()
        for box in bboxes:
            x_min = box[0]
            y_min = box[1]
            x_max = box[2]
            y_max = box[3]
            if horizon:
                flip_bboxes.append([w - x_max, y_min, w - x_min, y_max])
            else:
                flip_bboxes.append([x_min, h - y_max, x_max, h - y_min])

        return flip_img, flip_bboxes
    # cutout
    def cutout(self, img, bboxes, length=100, n_holes=1, threshold=0.5):
        def cal_iou(boxA, boxB):
            '''
            boxA, boxB为两个框，返回iou
            boxB为bouding box
            '''
            # determine the (x, y)-coordinates of the intersection rectangle
            xA = max(boxA[0], boxB[0])
            yA = max(boxA[1], boxB[1])
            xB = min(boxA[2], boxB[2])
            yB = min(boxA[3], boxB[3])
            if xB <= xA or yB <= yA:
                return 0.0
            # compute the area of intersection rectangle
            interArea = (xB - xA + 1) * (yB - yA + 1)
            # compute the area of both the prediction and ground-truth
            # rectangles
            boxAArea = (boxA[2] - boxA[0] + 1) * (boxA[3] - boxA[1] + 1)
            boxBArea = (boxB[2] - boxB[0] + 1) * (boxB[3] - boxB[1] + 1)
            # compute the intersection over union by taking the intersection
            # area and dividing it by the sum of prediction + ground-truth
            # areas - the interesection area
            # iou = interArea / float(boxAArea + boxBArea - interArea)
            iou = interArea / boxBArea
            # return the intersection over union value
            return iou
        # 得到h和w
        if img.ndim == 3:
            h, w, c = img.shape
        else:
            _, h, w, c = img.shape
        mask = np.ones((h, w, c), np.float32)
        for n in range(n_holes):
            chongdie = True  # 看切割的区域是否与box重叠太多
            while chongdie:
                y = np.random.randint(h)
                x = np.random.randint(w)
                y1 = np.clip(y - length // 2, 0,
                             h)
                y2 = np.clip(y + length // 2, 0, h)
                x1 = np.clip(x - length // 2, 0, w)
                x2 = np.clip(x + length // 2, 0, w)
                chongdie = False
                for box in bboxes:
                    if cal_iou([x1, y1, x2, y2], box) > threshold:
                        chongdie = True
                        break
            mask[y1: y2, x1: x2, :] = 0.
        # mask = np.expand_dims(mask, axis=0)
        img = img * mask
        return img

    # 调整亮度
    def _changeLight(self, img):
        # random.seed(int(time.time()))
        flag = random.uniform(0.5, 1.5)  # flag>1为调暗,小于1为调亮
        return exposure.adjust_gamma(img, flag)

image = r"F:\VOCtrainval_11-May-2012\VOCdevkit\VOC2012\JPEGImages\2007_000925.jpg"
bboxes = [[29,101,216,343],[309,105,460,333]]
img = cv2.imread(image)
da = DataAugmentForObjectDetection()
# img,bboxes = da.rotate_img_bboxes(img,bboxes)
# img,bboxes = da.crop_img_bboxes(img,bboxes)
# img,bboxes = da.shift_pic_bboxes(img,bboxes)
# img,bboxes = da.h_filp_pic_bboxes(img,bboxes)
img = da.cutout(img,bboxes,length=50)
show_pic(img,bboxes)


###################################
#####   图片增强类 ##################
###################################
# 颜色变换类
class ColorTransformer:
    def __init__(self,ddir):
        pass


