# -*- coding: utf-8 -*-
"""
Created on Fri Sep 27 17:08:30 2019

@author: swls
"""

import os
import os.path as osp
import re
import sys
import cv2
import json
from tqdm import tqdm
import shutil
import imageio
import numpy as np
from collections import defaultdict
from multiprocessing import Pool

import sys
sys.path.append('...\..')

import imgaug as ia
import imgaug.augmenters as iaa
from imgaug.augmentables.polys import Polygon, PolygonsOnImage


from tool import utils  # export PYTHONPATH=$PYTHONPATH:`pwd` 
from tool import opencv_tool  # export PYTHONPATH=$PYTHONPATH:`pwd` 
from tool import imgaug_tool  # export PYTHONPATH=$PYTHONPATH:`pwd` 
from tool import via_tool  # export PYTHONPATH=$PYTHONPATH:`pwd` 
from tool import filesystem, darknet_tool

provinces = ["皖", "沪", "津", "渝", "冀", "晋", "蒙", "辽", "吉", "黑", "苏", "浙", "京", "闽", "赣", "鲁", "豫", "鄂", "湘", "粤", "桂", "琼", "川", "贵", "云", "藏", "陕", "甘", "青", "宁", "新", "警", "学", "O"]
alphabets = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'J', 'K', 'L', 'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', 'O']
ads = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'J', 'K', 'L', 'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'O']


def read_poloygon_json_to_list(data_dir):
    with open(data_dir + os.sep + "via_region_data.json","r") as rf:
        data_dict = json.loads(rf.read())
    data_new =  {}
    for filename in data_dict.keys():
        xys = []
        for one_label in data_dict[filename]["regions"]:
            xs = one_label["shape_attributes"]["all_points_x"]
            ys = one_label["shape_attributes"]["all_points_y"]
            xy = []
            for x,y in zip(xs,ys):
                xy.append((x,y))
            xys.append(xy)
        data_new[data_dir + os.sep + data_dict[filename]["filename"]] = xys
    #print(len(data_new.keys()))
    return data_new

def read_rect_json_to_list(data_dir):
    with open(data_dir + os.sep + "via_region_data.json","r") as rf:
        data_dict = json.loads(rf.read())
    data_new =  {}
    for idx,filename in enumerate(data_dict.keys()):
        xys = []
        # if idx > 100:
        #     continue
        for one_label in data_dict[filename]["regions"]:
            x0 = one_label["shape_attributes"]["x"]
            y0 = one_label["shape_attributes"]["y"]
            x1 = one_label["shape_attributes"]["width"] + x0
            y1 = one_label["shape_attributes"]["height"] + y0
            xy = [x0,y0,x1,y1]
            xys.append(xy)
        if len(xys) == 0:
            continue
        data_new[data_dir + os.sep + data_dict[filename]["filename"]] = xys
    #print(len(data_new.keys()))
    return data_new

def convert_to_json_and_save_img_for_ccpd(save_dir, image_aug, polygon_aug, name_infos, crop_width=700,crop_height=700, diff=10):
    """
    裁剪随机大小图片
    """
    via_dict = dict()
    for idx, (image, polygons, name) in enumerate(zip(image_aug, polygon_aug,name_infos)):
        image_path = save_dir + os.sep + name
        height,width= image.shape[:2]
        
        current_dict = dict()
        current_dict["filename"] = name
        regions = []

        last_x=0
        last_y=0

        for plg in polygons:
            shape_attributes = dict()
            shape_attributes["name"] = 'polygon'
            xx =[int(x) for x in plg.xx]
            yy =[int(y) for y in plg.yy]
            center_x = int(np.mean(np.array(xx)))
            center_y = int(np.mean(np.array(yy)))
            region_w = np.max(xx) - np.min(xx)
            region_h = np.max(yy) - np.min(yy)

            random_start_x = (center_x + region_w//2) - crop_width 
            random_start_x = 0 if random_start_x < 0 else random_start_x
            random_end_x = center_x - region_w//2 
            random_end_x = random_end_x-(random_end_x + crop_width-width) if random_end_x + crop_width > width else random_end_x
            if random_end_x<0:
                random_end_x=0

            random_start_y = (center_y + region_h/2) - crop_height
            random_start_y = 0 if random_start_y < 0 else random_start_y
            random_end_y = center_y - region_h/2
            random_end_y = random_end_y - (random_end_y + crop_height-height) if random_end_y + crop_height > height else random_end_y
            if random_end_y < 0:
                random_end_y=0
            if random_start_x >  random_end_x:
                random_start_x = 0
                random_end_x =0
            last_x = np.random.randint(random_start_x, random_end_x+1)
            
            if random_end_y +1 - random_start_y  > 20:
                last_y = np.random.randint(random_start_y+diff, random_end_y+1 - diff)
            else:
                last_y = np.random.randint(random_start_y, random_end_y+1)
    
            shape_attributes["all_points_x"] = [x - last_x for x in xx ]
            shape_attributes["all_points_y"] = [y - last_y for y in yy ]
            regions.append({"shape_attributes": shape_attributes, "region_attributes":{"type":"text"}})
        current_dict["regions"] = regions
        current_dict["file_attributes"] = {}
        via_dict[name] = current_dict

        imageio.imwrite(image_path, image[last_y:last_y+crop_height, last_x:last_x+crop_width])
    with open(save_dir + os.sep + "via_region_data_ori.json", "w") as rf:
        rf.write(json.dumps(via_dict))

def convert_to_rect_json_and_save_img(save_dir, image_aug, polygon_aug):

    via_dict = dict()
    for idx, (image, polygons) in enumerate(zip(image_aug, polygon_aug)):
        name = "imgaug_" + str(idx) + ".jpg"
        image_path = save_dir + os.sep + name
        imageio.imwrite(image_path, image)
        current_dict = dict()
        current_dict["filename"] = name
        regions = []

        for plg in polygons:
            shape_attributes = dict()
            shape_attributes["name"] = 'rect'
            shape_attributes["x"] = int(np.min([plg.x1_int, plg.x2_int]))
            shape_attributes["y"] = int(np.min([plg.y1_int, plg.y2_int]))
            shape_attributes["width"] = int(abs(plg.x1_int- plg.x2_int))
            shape_attributes["height"] = int(abs(plg.y1_int- plg.y2_int))

            regions.append({"shape_attributes": shape_attributes, "region_attributes":{"type":"rect"}})
        current_dict["regions"] = regions
        current_dict["file_attributes"] = {}
        via_dict[name] = current_dict

    with open(save_dir + os.sep + "via_region_data_ori.json", "w") as rf:
        rf.write(json.dumps(via_dict))

def aug_image(data_dict, polygon=False):
    io_images = []
    polygons = []

    name_infos = []
    for idx, image_path in enumerate(data_dict.keys()):
        name_infos.append(os.path.basename(image_path))
        img = imageio.imread(image_path)
        # new_img = cv2.rotate(img, cv2.ROTATE_90_COUNTERCLOCKWISE)
        io_images.append(img)

        if polygon:
            polygons.append([ ia.Polygon([(x,y) for x,y in xy] )  for xy in data_dict[image_path]])
        else:
            polygons.append([ ia.BoundingBox(x1=boxes[0], y1=boxes[1],x2=boxes[2],y2=boxes[3]) for boxes in  data_dict[image_path] ])
        # if idx == 2:
        #     break
    # new_image = ia.imresize_single_image(io_image, 0.5)

    sometimes = lambda aug: iaa.Sometimes(0.5, aug)

    seq = iaa.Sequential(
        [
            # iaa.Rot90(1, keep_size=False)
            # iaa.AdditiveGaussianNoise(scale=(0.01 * 255)),
            # iaa.Affine(translate_px={"x":(1,5)}),
            # sometimes([
            # iaa.PadToFixedSize(width=2000, height = 2000),
            # iaa.CropToFixedSize(width=720,height=720),
            # ]),
            # sometimes(
            #     [
            #         iaa.Dropout(p=(0.01, 0.1), per_channel=0.5),
            # #         #iaa.Flipud(0.5),
                    # iaa.LinearContrast((0.5,1.5), per_channel=0.5),
                    iaa.Add((-10, 10), per_channel=0.5),
            #         # iaa.SimplexNoiseAlpha(iaa.OneOf([
            #         #    iaa.EdgeDetect(alpha=(0.1, 0.5)),
            #         #    iaa.DirectedEdgeDetect(alpha=(0.1, 0.5), direction=(0.0, 1.0))
            #         # ])),
            #     ]
            # ),
            # iaa.OneOf([
            #     iaa.GaussianBlur((0, 3.0)),
            #     iaa.MedianBlur(k=(3,5)),
            #     iaa.AverageBlur(k=(3,5))
            # ])
        ],
        random_order=False
    )

    if polygon:
        image_aug, polygon_aug = seq(images=io_images, polygons=polygons)
    else:
        image_aug, polygon_aug = seq(images=io_images, bounding_boxes=polygons)
    return image_aug, polygon_aug, name_infos

    # ia.imshow(new_image)
    
# 将图片按图片名分组，创建文件夹
def make_group(data_dir):


    with open(data_dir + os.sep + "via_region_data.json","r") as rf:
        data_dict = json.loads(rf.read())
    data_new =  {}
    for idx,filename in enumerate(data_dict.keys()):
        xys = []
        # if idx > 100:
        #     continue
        for one_label in data_dict[filename]["regions"]:
            x0 = one_label["shape_attributes"]["x"]
            y0 = one_label["shape_attributes"]["y"]
            x1 = one_label["shape_attributes"]["width"] + x0
            y1 = one_label["shape_attributes"]["height"] + y0
            xy = [x0,y0,x1,y1]
            xys.append(xy)
        # if len(xys) == 0:
        #     continue
        data_new[data_dict[filename]["filename"]] = xys
    #print(len(data_new.keys()))


    data_dict = defaultdict(list)
    for p in os.listdir(data_dir):
        if p.endswith("jpg"):
            name = "_".join(p.split("_")[:-1])
            data_dict[data_dir+os.sep+name].append(data_dir +os.sep+p)

    for key in data_dict.keys():
        os.makedirs(key, exist_ok=True)
        for path in data_dict[key]:
            dst_path = key + os.sep + os.path.basename(path)

            if len(data_new[os.path.basename(path)]) != 0:
                image = cv2.imread(path)
                box = data_new[os.path.basename(path)][0]
                save_image = image[box[1]:box[3], box[0]:box[2]]
                cv2.imwrite( dst_path,save_image)


# 分发，一个文件夹4个
def mean_distribute(data_dir):
    all_path = [data_dir + os.sep + p for p in os.listdir(data_dir)]

    for idx, path in enumerate(all_path):
        name = str(int(idx / 4))
        
        save_dir = data_dir + os.sep + name
        os.makedirs(save_dir, exist_ok=True)
        shutil.move(path, save_dir + os.sep + os.path.basename(path))

# 清除 via.json 里面的空白数据 和 不存在的图片
def clear_none_image(data_dir):
    with open(data_dir + os.sep + "via_region_data.json","r") as rf:
        data_dict = json.loads(rf.read())
    data_new =  {}
    for idx,filename in enumerate(data_dict.keys()):
        
        if not os.path.exists(data_dir + os.sep + data_dict[filename]["filename"]):
            continue
        if len(data_dict[filename]["regions"]) == 0:
            continue
        
        data_new[filename] = data_dict[filename]
    with open(data_dir + os.sep + "via_region_data.json","w") as wf:
        wf.write(json.dumps(data_new))

# 将未标记的图片移至文件夹内
def move_not_via_image(data_dir, dst_dir):
    with open(data_dir + os.sep + "via_region_data.json","r") as rf:
        data_dict = json.loads(rf.read())

    all_files = [ p for p in os.listdir(data_dir) if p.endswith("jpg")]
    for idx,filename in enumerate(data_dict.keys()):
        
        file_name = data_dict[filename]["filename"]
        if file_name in all_files:
            del all_files[all_files.index(file_name)]
        
    os.makedirs(dst_dir, exist_ok=True)
    for p in all_files:
        shutil.move(data_dir + os.sep + p , dst_dir + os.sep + p)

def ccpd_to_via(img_dir, save_dir, count=300, images_in_per_dir=5000):
    """
    单张图转换 
    """
    # os.makedirs(save_dir,exist_ok=True)

    all_files = get_all_filepath(img_dir, ["jpg"])
    np.random.shuffle(all_files)
    np.random.shuffle(all_files)


    epochs = int(np.floor(len(all_files) / images_in_per_dir))
    for e in range(epochs):

        new_save_dir = save_dir+"_"+ str(e)
        os.makedirs(new_save_dir,exist_ok=True)
        data_dict = defaultdict(list)
        write_label = open(new_save_dir + os.sep + "plate_number.txt", "w", encoding="utf-8")

        for idx, p in enumerate(all_files[e*images_in_per_dir:(e+1)*images_in_per_dir]):
            # print(p)
            if count is not None and idx > count :break
            base_name = os.path.basename(p)
            four_coordinates, plate_number = base_name.split("-")[3: 5]
            xys = [xy.split("&") for xy in four_coordinates.split("_")]
            data_dict[p].append(xys)
            write_label.write(p + " "+ plate_number + "\n")
        write_label.close()
        image_aug, polygon_aug, name_infos = aug_image(data_dict, polygon=True)
        convert_to_json_and_save_img_for_ccpd(new_save_dir, image_aug, polygon_aug, name_infos)

def get_coordinates_and_plate_number(base_name):
    infos = base_name.split("-")
    if len(infos) == 1:
        four_coordinates, plate_number = "", ""
    else:
        four_coordinates, plate_number = infos[3: 5]
    xys = []

    fcs = four_coordinates.split("_")
    if len(fcs) == 1:
        fcs = []
    for xy in fcs:
        x,y = xy.split("&") 
        xys.append([int(x), int(y)])
    
    number = ""
    pns = plate_number.split("_")
    if len(pns) == 1:
        pns = []
    for idx, c in enumerate(pns):
        if idx == 0:
            number += provinces[int(c)]
        elif idx == 1:
            number += alphabets[int(c)]
        else:
            number += ads[int(c)]
    return xys, number 

def stitch_image(epoch_idx, save_dir, all_files, images_in_per_dir=500):
    image_paths, labels = [], []
    for image_path in all_files:

        base_name = os.path.basename(image_path)
        xys, number = get_coordinates_and_plate_number(base_name)
        image_paths.append(image_path)
        

        labels.append((xys, number))
    print("image_paths: ", len(image_paths), image_paths[0])

    new_images, new_labels = [], []
    count = 0
    epochs = 0

    stitch_nums = np.random.randint(1,4)
    while count < len(image_paths):
        stitch_img = None
        stitch_lab = []
        consum_x = 0
        for idx, i in enumerate(range(count, count+stitch_nums)):
            try:
                img = imageio.imread(image_paths[i])
            except:
                print("error ", image_path)
                shutil.move(image_paths[i], osp.join(r"F:\work\dataset\ocr\plate\error_img", osp.basename(image_paths[i])))
                break

            if idx == 0:
                stitch_img = img
                stitch_lab.append(labels[i])
            else:
                stitch_img = np.concatenate((stitch_img,img), axis=1 )
                stitch_lab.append([[[x+consum_x, y] for x,y in labels[i][0] ], labels[i][1]])
            consum_x += img.shape[1]
        
        if stitch_img is not None:
            new_images.append(stitch_img)
            new_labels.append(stitch_lab)

        print(len(new_images))
        if len(new_images) == images_in_per_dir:
            new_save_dir = save_dir+os.sep+ "{}_{}".format(epoch_idx, epochs)
            os.makedirs(new_save_dir,exist_ok=True)

            via_tool.convert_to_poloygon_json_and_save_img_2(new_save_dir, 
                                        new_images,
                                        new_labels
                                        )
            new_images = []
            new_labels = []
            epochs +=1

        count += stitch_nums
        stitch_nums = np.random.randint(1,4)
        if count + stitch_nums > len(image_paths):
            new_save_dir = save_dir+os.sep+ "{}_{}".format(epoch_idx, epochs)
            os.makedirs(new_save_dir,exist_ok=True)

            via_tool.convert_to_poloygon_json_and_save_img_2(new_save_dir, 
                                        new_images,
                                        new_labels
                                        )
            new_images = []
            new_labels = []
            epochs +=1

            break
    return new_images, new_labels

def ccpd_to_via_stitch(img_dirs, save_dir, images_in_per_dir=500):
    """
    多张图转换 
    """
    os.makedirs(save_dir,exist_ok=True)

    all_files = []
    for img_dir in img_dirs:
        all_files.extend(filesystem.get_all_filepath(img_dir, [".jpg"]))
    print("total image {}".format(len(all_files)))

    np.random.shuffle(all_files)
    np.random.shuffle(all_files)

    
    thread_count = 4
    nums = int(np.ceil(len(all_files) / thread_count))
    # print('Parent process %s.' % os.getpid())
    p = Pool(thread_count)
    for i in range(thread_count):

        # stitch_image(i,save_dir,all_files[i*nums: (i+1)*nums],images_in_per_dir)
        p.apply_async(stitch_image, args=(i,save_dir,all_files[i*nums: (i+1)*nums],images_in_per_dir))
    
    print('Waiting for all subprocesses done...')
    p.close()
    p.join()




def main(data_dir):

    data_dict = read_rect_json_to_list(data_dir)

    ori_dict = {}
    aug_dict = {}
    for path in data_dict.keys():
        # value = os.path.basename(path).split("_")[0]
        # if 5 <= int(value) <= 20:
        aug_dict[path] = data_dict[path]
        # else:
        #     ori_dict[path] = data_dict[path]

    image_aug, polygon_aug ,name_infos= aug_image(aug_dict)

    new_save_dir = data_dir + "_imgaug"
    if not os.path.exists(new_save_dir):
        os.mkdir(new_save_dir)
    # convert_to_json_and_save_img(new_save_dir, image_aug, polygon_aug)

    for path in ori_dict.keys():
        img = imageio.imread(path)
        poly = [ ia.BoundingBox(x1=b[0], y1=b[1],x2=b[2], y2=b[3]) for b in  ori_dict[path] ]
        image_aug.append(img)
        polygon_aug.append(poly)
    convert_to_rect_json_and_save_img(new_save_dir, image_aug, polygon_aug)



if __name__ == "__main__":
    # data_dir = "C:\\Users\\swls\\Desktop\\being\\2019-09-24\\label"
    # img_dir = "/media/swls/disk1/datasets_kanggle/license_plate/CCPD2019/ccpd_base" 
    # save_dir = "/media/swls/disk1/project/plate_recognition/via"
    # ccpd_to_via(img_dir, save_dir, count=None)

    # 随机拼接车牌图像
    # img_dirs = [
    #     r"F:\work\dataset\ocr\plate\CCPD2019\ccpd_base",
    #     r"F:\work\dataset\ocr\plate\CCPD2020\ccpd_green\train",
    #     r"F:\work\dataset\ocr\plate\CCPD2020\ccpd_green\test",
    # ]

    img_dirs = [
        r"F:\work\dataset\ocr\plate\CCPD2020\ccpd_green\val",
        r"F:\work\dataset\ocr\plate\CCPD2019"
    ]
    save_dir = r"F:\work\dataset\ocr\plate\plate_test"
    ccpd_to_via_stitch(img_dirs, save_dir)


    # data_root = "/mnt/disk1/vanlance/project/plate_recognition/via"
    # via_name='via_region_data_ori.json'
    # gen_type="plate_recognition_2"
    
    # darknet_tool.deal_many_dir_local(data_root, via_name=via_name, gen_type=gen_type)
    # darknet_tool.create_train_val_txt(data_root)
    # darknet_tool.check_darket_train_data(data_root)


    # data_ori_path = "/mnt/disk1/vanlance/project/plate_recognition/via/0_0/via_region_data_ori.json"
    # size_path =     "/mnt/disk1/vanlance/project/plate_recognition/via/0_0/via_region_data_size.json"
    # via_tool.convert_to_via(data_ori_path,size_path)

    #5 - 20
    # data_dir = r"/media/swls/disk1/project/electric_meter_box/20191107"
    # main(data_dir)

    # 3
    # data_ori_path = r"/media/swls/disk1/project/plate_recognition/via/via_region_data_ori.json"
    # via_path = r"/media/swls/disk1/project/plate_recognition/via/via_region_data_size.json"
    # convert_to_via(data_ori_path, via_path)


    # data_dir = r"/media/swls/disk1/project/electric_meter_box/20191107"
    # make_group(data_dir)


    # data_dir = r"/media/swls/disk1/project/electric_meter_box/20191107\imgaug"
    # data_dir = r"C:\Users\swls\Desktop\crop"
    # main(data_dir)



    # data_dir = r"C:\Users\swls\Desktop\crop_imgaug\imgaug"
    # mean_distribute(data_dir)

    # data_dir = "/media/swls/disk1/project/electric_meter_box/stitch_last"
    # # clear_none_image(data_dir)
    # dst_dir = "/media/swls/disk1/project/electric_meter_box/stitch_last/other"
    # move_not_via_image(data_dir, dst_dir)
