# -*- coding: utf-8 -*-
"""
Created on Tue Oct  8 09:16:52 2019

@author: swls
"""

import os
import json
import sys
import numpy as np
import cv2
import re
import imageio
import imgaug as ia
import imgaug.augmenters as iaa
from imgaug.augmentables.polys import Polygon, PolygonsOnImage

from data_enhance import DataEnhance



def rotate_enhance(image, polygon):
    image2 = image.copy()
    new_image = cv2.copyMakeBorder(image2, 500, 500, 500, 500, cv2.BORDER_CONSTANT, 255)
    new_polygon = [ ia.Polygon([(x+500,y+500) for x,y in zip(polygon[0].xx, polygon[0].yy)])]
    seq = iaa.Sequential([
            iaa.Affine(
            scale={"x": (0.8, 1.2), "y": (0.8, 1.2)}, # scale images to 80-120% of their size, individually per axis
            translate_percent={"x": (-0.2, 0.2), "y": (-0.2, 0.2)}, # translate by -20 to +20 percent (per axis)
            rotate=(-45, 45), # rotate by -45 to +45 degrees
            shear=(-16, 16), # shear by -16 to +16 degrees
            order=[0, 1], # use nearest neighbour or bilinear interpolation (fast)
            cval=(0, 255), # if mode is constant, use a cval between 0 and 255
            mode=ia.ALL # use any of scikit-image's warping modes (see 2nd image from the top for examples)
    )])
    image1, polygon = seq(images=[new_image], polygons=[new_polygon])
    return image1, polygon

def enhance_dataset(data_dir, save_dir):
    with open(data_dir + os.sep + "via_region_data.json","r", encoding="utf-8") as rf:
        data_dict = json.loads(rf.read())

    data_enhances = []
    for k in data_dict.keys():
        print(k)
        de = DataEnhance(data_dir, data_dict[k])
        data_enhances.append(de)

    random_list = [x for x in range(len(data_enhances))]
    np.random.shuffle(random_list)
    np.random.shuffle(random_list)

    for idx in range(len(data_enhances)):
        num = np.random.randint(5,20)
        
        indeies = np.random.choice(random_list, num)
        for i in indeies:
            cur_idx = np.random.randint(data_enhances[i].object_count)
            if data_enhances[i].label_info[cur_idx] == "text":
                polygon = [ ia.Polygon([(x,y) for x,y in data_enhances[i].region_info[cur_idx]] )  ]
                image = data_enhances[i].ori_image.copy()
                
                new_image, new_polygon = rotate_enhance(image, polygon)
                data_enhances[idx].random_add_region(new_image[0],
                                                        data_enhances[i].label_info[cur_idx],
                                                        np.array([ [int(x),int(y)] for x,y in zip(new_polygon[0][0].xx, new_polygon[0][0].yy)])        )
        # data_enhances[random_list[0]].save_image(data_dir + os.sep + "000.jpg")

    # save to disk
    total_data = dict()
    for idx in range(len(data_enhances)):
        save_path = save_dir + os.sep + str(idx) +"_first.jpg"
        d = data_enhances[idx].convert_to_dict(save_path)
        total_data[d["filename"]] = d
    with open(save_dir + os.sep + "via_region_data_ori.json","w") as wf:
        wf.write(json.dumps(total_data))

def convert_to_via(data_ori_path, via_path):
    with open(data_ori_path,"r") as rf:
        data_dict = json.loads(rf.read())

    with open(via_path,"r") as rf:
        via_dict = json.loads(rf.read())

    save_dict = dict()
    reg = re.compile(r"(.*jpg)(\d+)")
    for key in via_dict.keys():
        outs = reg.findall(key)
        if len(outs) == 0:
            print("con't match re")
            continue
        filename = outs[0][0]
        size = int(outs[0][1])
        data_dict[filename]["size"] = size
        save_dict[filename + str(size)] = data_dict[filename]

    with open(os.path.dirname(data_ori_path) + os.sep + "via_region_data.json","w") as wf:
        wf.write(json.dumps(save_dict))




class ImageInfo:
    def __init__(self, data_dict):
        self.image_name = data_dict["filename"]
        self.data_label = []
        self.data_polygon = []
        self.read_data(data_dict)
    
    def read_data(self, data_dict):
        for one_label in data_dict["regions"]:
            xs = one_label["shape_attributes"]["all_points_x"]
            ys = one_label["shape_attributes"]["all_points_y"]
            xy = []
            for x,y in zip(xs,ys):
                xy.append((x,y))
            self.data_polygon.append(xy)
            self.data_label.append(one_label["region_attributes"]["type"] )

    def get_dict(self, polygons):
        current_dict = dict()
        current_dict["filename"] = self.image_name
        regions = []

        for idx,plg in enumerate(polygons):
            shape_attributes = dict()
            shape_attributes["name"] = 'polygon'
            shape_attributes["all_points_x"] = [int(x) for x in plg.xx]
            shape_attributes["all_points_y"] = [int(y) for y in plg.yy]
            regions.append({"shape_attributes": shape_attributes, 
                            "region_attributes":{"type":self.data_label[idx] }})
        current_dict["regions"] = regions
        current_dict["file_attributes"] = {}
        return current_dict

class ImageAug:
    def __init__(self, data_dir):
        self.data_dir = data_dir
        # self.save_dir = save_dir
        self.image_infos = [ ]
        self.read_json_to_imageinfo(data_dir)

    def read_json_to_imageinfo(self, data_dir):
        with open(data_dir + os.sep + "via_region_data_ori.json","r") as rf:
            data_dict = json.loads(rf.read())
        for k in data_dict.keys():
            self.image_infos.append(ImageInfo(data_dict[k]))

    def save_json_and_save_img(self, save_dir, image_aug, polygon_aug):

        via_dict = dict()
        for idx, (image, polygons) in enumerate(zip(image_aug, polygon_aug)):
            name = self.image_infos[idx].image_name
            image_path = save_dir + os.sep + name
            imageio.imwrite(image_path, image)
            current_dict = self.image_infos[idx].get_dict(polygons)
            via_dict[name] = current_dict

        with open(save_dir + os.sep + "via_region_data_ori.json", "w") as rf:
            rf.write(json.dumps(via_dict))

    def aug_image(self):
        io_images = []
        polygons = []

        for idx, image_info in enumerate(self.image_infos):
            io_images.append(imageio.imread( self.data_dir + os.sep + image_info.image_name ))
            polygons.append([ ia.Polygon([(x,y) for x,y in xy] )  for xy in image_info.data_polygon])
            # if idx == 2:
            #     break
        # new_image = ia.imresize_single_image(io_image, 0.5)

        sometimes = lambda aug: iaa.Sometimes(0.5, aug)
        seq = iaa.Sequential(
            [
                # iaa.AdditiveGaussianNoise(scale=(0.01 * 255)),
                # iaa.Affine(translate_px={"x":(1,5)}),
                iaa.OneOf([
                    iaa.OneOf([
                        iaa.Flipud(1),
                        iaa.Fliplr(1)
                    ]),
                    iaa.SomeOf((1,2),[
                            iaa.Flipud(1),
                            iaa.Fliplr(1)
                        ],
                    random_order=True
                    ),
                ]),
                # iaa.SomeOf((0,3),
                #     [
                #         iaa.Dropout(p=(0.01, 0.1), per_channel=0.5),
                #         iaa.Flipud(1),
                #         iaa.Fliplr(1),
                #         iaa.LinearContrast((0.5,1.5), per_channel=0.5),
                #         iaa.Add((-20, 20), per_channel=0.5),
                #         #iaa.SimplexNoiseAlpha(iaa.OneOf([
                #         #    iaa.EdgeDetect(alpha=(0.5, 1.0)),
                #         #    iaa.DirectedEdgeDetect(alpha=(0.5, 1.0), direction=(0.0, 1.0))
                #         #])),
                #     ],
                #     random_order=True
                # ),
                # iaa.OneOf([
                #     iaa.GaussianBlur((0, 3.0)),
                #     iaa.MedianBlur(k=(3,5)),
                #     iaa.AverageBlur(k=(3,5))
                # ])
            ],
            random_order=True
        )

        image_aug, polygon_aug = seq(images=io_images, polygons=polygons)
        return image_aug, polygon_aug

    def run(self):

        # data_dict = read_json_to_list(data_dir)

        image_aug, polygon_aug = self.aug_image()

        new_save_dir = self.data_dir + "_imgaug_flip"
        if not os.path.exists(new_save_dir):
            os.mkdir(new_save_dir)
        # convert_to_json_and_save_img(new_save_dir, image_aug, polygon_aug)
        self.save_json_and_save_img(new_save_dir, image_aug, polygon_aug)
        # for idx, (image, polygons) in enumerate(zip(image_aug, polygon_aug)):
        #     # name = "_imgaug.".join(name.split("."))
        #     image_path = new_save_dir + os.sep + name
        #     imageio.imwrite(image_path, image)



if __name__ == "__main__":
    # 1
    # data_dir = r"C:\Users\swls\Desktop\being\2019-10-14\total"
    # save_dir = data_dir + "_enhance"
    # os.makedirs(save_dir, exist_ok=True)
    # enhance_dataset(data_dir, save_dir)

    # 2
    # data_ori_path = r"C:\Users\swls\Desktop\being\2019-10-14\label_enhance\via_region_data_ori.json"
    # via_path = r"C:\Users\swls\Desktop\being\2019-10-14\label_enhance\via_region_data_size.json"
    # convert_to_via(data_ori_path, via_path)

    # 3
    data_dir = r"C:\Users\swls\Downloads\enhance_data1"
    image_aug = ImageAug(data_dir)
    image_aug.run()
