from ctypes import CDLL,byref,create_string_buffer,cdll
from PIL import Image, ImageColor, ImageFont, ImageDraw, ImageFilter
import numpy as np
import base64
import cv2
import os
import shutil
import json
import io
import re
import sys
import copy
from tqdm import tqdm
from multiprocessing import Pool

import shapely
from shapely.geometry import Polygon, MultiPoint  #多边形

from tool import filesystem,imgaug_tool,utils, data_enhance, via_tool # export PYTHONPATH=$PYTHONPATH:`pwd`

class ImageCroper:
    def __init__(self, save_dir, cv_image, data_dict, filename, background_dir, extract_label=None):
        self.ori_image = copy.copy(cv_image)
        self.filename = filename
        self.save_dir = save_dir
        # 是否提取固定名字的标签
        self.extract_label = extract_label

        self.all_boxs = []

        self.total_data = dict()
        self.total_idx = 0
        self._load_data(data_dict)
        self.background_images = self.load_background_image(background_dir, img_num=3)


    def _load_data(self, data_dict):
        if data_dict is None :
            return 0

        remove_count = self.ori_image.shape[1] // 720
        exclude_poly = []
        for region in data_dict:
            label = region["region_attributes"]["label"]
            xys = [[x,y] for x,y in zip(region["shape_attributes"]["all_points_x"], 
                                        region["shape_attributes"]["all_points_y"])]
            # # 踢除原始数据中的车牌 # #
            if remove_count > 0:
                # # 缺失车牌情况
                if label == "plate": 
                    remove_count = 0
                else:
                    exclude_poly.append(xys)
                    self.all_boxs.append([label, xys])
                    remove_count-=1
                    continue

            is_continue = False
            poly1 = Polygon(np.array(xys)).convex_hull
            for p in exclude_poly:
                poly2 = Polygon(np.array(p)).convex_hull
                if poly1.intersects(poly2):
                    is_continue = True
                    break
            if is_continue: continue
            # # 踢除原始数据中的车牌 # # 

            self.all_boxs.append([label, xys])

    def load_background_image(self, image_dir, img_num=3):
        image_files = filesystem.get_all_filepath(image_dir, [".jpg"])
        np.random.shuffle(image_files)
        np.random.shuffle(image_files)
        background_images = [cv2.imread(f) for f in image_files[:img_num]]
        return background_images

    def random_enlarge_crop_plate(self):

        for count, box in enumerate(self.all_boxs):
            # print( box[0])
            if self.extract_label and box[0] != self.extract_label: continue

            poly1 = Polygon(np.array(box[1])).convex_hull

            ok_boxs = []
            ok_labels = []
            for c, (l, xy) in enumerate(self.all_boxs):
                if c == count:continue 

                poly2 = Polygon(np.array(xy)).convex_hull
                if poly1.intersects(poly2):
                    ok_boxs.append(xy)
                    ok_labels.append(l)
            
            if len(ok_boxs) == 0:
                continue

            plate_rect = cv2.boundingRect(np.array(box[1]))
            srect = self.enlarge_rect(plate_rect)
            if srect[3] < 10 or srect[2] < 10:
                print("srect[3] - srect[1] < 10 or srect[2] - srect[0] < 10")
                continue
            crop_image = copy.copy(self.ori_image[srect[1]:srect[1]+srect[3], srect[0]:srect[0]+srect[2]])

            mask = self.get_mask(count)
            crop_mask = mask[srect[1]:srect[1]+srect[3], srect[0]:srect[0]+srect[2]]

            background = self.get_random_background(crop_mask.shape)
            cv2.copyTo(background, crop_mask, crop_image)

            ok_boxs = [[[x - srect[0],y-srect[1]] for x,y in polygon] for polygon in ok_boxs]
            self.convert_to_dict(crop_image, ok_boxs, ok_labels)


    def get_random_background(self, shape):
        need_h, need_w = shape

        image = self.background_images[np.random.randint(len(self.background_images))]
        max_h,max_w = image.shape[:2]

        if need_h >= max_h or need_w >= max_w:
            image = cv2.resize(image, (need_w+5, need_h+5))
            max_h,max_w = image.shape[:2]
        start_h = np.random.randint(max_h - need_h)
        start_w = np.random.randint(max_w - need_w)

        return image[start_h:start_h+need_h, start_w:start_w+need_w]

    def enlarge_rect(self, plate_rect):
        """
        扩大矩形框
        """
        center_point = [plate_rect[0] + plate_rect[2] // 2, plate_rect[1]+plate_rect[3] // 2]
        max_w_h = np.max((plate_rect[3], plate_rect[2]))
        
        ratio = np.random.randint(20,80) * 0.01
        x = center_point[0] - int(max_w_h * (0.5+ratio)) 
        if x < 0 : x = 0
        y = center_point[1] - int(max_w_h * (0.5+ratio)) 
        if y < 0 : y = 0
        
        if x + int(max_w_h * (1+ratio*2))  < self.ori_image.shape[1] :
            width = int(max_w_h * (1+ratio*2))  
        else :
            width = self.ori_image.shape[1] - x
        
        if y + int(max_w_h * (1+ratio*2)) < self.ori_image.shape[0] :
            height = int(max_w_h * (1+ratio*2)) 
        else:
            height = self.ori_image.shape[0] - y

        return [ x, y, width, height ]

    def get_mask(self, exclude_idx):
        """
        注意过滤条件 重叠框
        """
        other_mask = np.zeros(self.ori_image.shape[:2], dtype=np.uint8)
        poly1 = Polygon(np.array(self.all_boxs[exclude_idx][1])).convex_hull
        for idx, box in enumerate(self.all_boxs):
            # if self.extract_label and box[0] != self.extract_label: continue
            poly2 = Polygon(np.array(self.all_boxs[idx][1])).convex_hull
            if poly1.intersects(poly2): continue
            # if idx == exclude_idx: continue
            other_mask = cv2.fillConvexPoly(other_mask, np.array(box[1]), [255])

        return other_mask

    def get_json_data(self):
        return self.total_data

    def convert_to_dict(self, cv_image, boxs, labels):

        filename = "_{}.".format(self.total_idx).join(self.filename.split("."))
        cv2.imwrite(self.save_dir + os.sep + filename, cv_image)

        one_box = dict()
        one_box["filename"] = filename
        regions = []
        for polygon, label in zip(boxs, labels):
            region = dict()
            region["region_attributes"] = {"label": label}
            region["shape_attributes"] = {"name": "polygon",
                                            "all_points_x": [int(x) for x,y in polygon],
                                            "all_points_y": [int(y) for x,y in polygon]}
            regions.append(region)
        one_box["regions"] = regions
        one_box["file_attributes"] = {}
        self.total_data[filename] = one_box
        self.total_idx+=1


def yolov3tiny_crop_plate(data_root, save_dir, bg_path="", via_name=""):

    region_data = data_root + os.sep +via_name
    with open(region_data) as f:
        json_data = json.loads(f.read())

    save_dict = dict()

    for idx, p in tqdm(enumerate(json_data.keys())):
        
        filename = json_data[p]["filename"]
        img_path = data_root + os.sep + filename
        image = cv2.imread(img_path, 1)

        image_croper = ImageCroper(save_dir, image, json_data[p]["regions"], 
                        filename, bg_path, extract_label="plate")
        image_croper.random_enlarge_crop_plate()
        some_dict = image_croper.get_json_data()
        save_dict.update(some_dict)

    with open(save_dir + os.sep + "via_region_data_ori.json", "w") as wf:
        wf.write(json.dumps(save_dict))


def deal_many_dir(data_root, save_dir, background_dir, via_name=""):
    """
        将所有图片中的车牌透视变换成训练数据
    """
    via_files = filesystem.get_all_filepath(data_root, [via_name])
    # np.random.shuffle(via_files)
    # np.random.shuffle(via_files)
    
    thread_count  = 12
    p = Pool(thread_count)
    for idx,via_path in enumerate(via_files):

        path_dir = os.path.dirname(via_path)
        if os.path.basename(path_dir) == "0_0_0_3_via":
            print("continue 0_0_0_3_via...")
            continue

        new_save_dir = path_dir.replace(data_root, save_dir)

        print("{}/{}".format(idx, len(via_files)))
        if os.path.exists(new_save_dir):
            print("exist: ", new_save_dir)
            continue    

        os.makedirs(new_save_dir)
        # yolov3tiny_crop_plate(path_dir, new_save_dir, background_dir, via_name)
        p.apply_async(yolov3tiny_crop_plate, args=(path_dir, new_save_dir, background_dir, via_name))
    # print('Waiting for all subprocesses done...')
    p.close()
    p.join()


def merge_many_dataset_to_crnn(data_roots, probs, via_name="labels.txt"):
    """
        以一定概率合并多个数据集到最后一个数据集里去
        以追加形式加到现有标记文件里去
    """
    dst_save_dir = data_roots[-1]

    for data_set in data_roots[:-1]:

        for data_dir_name in os.listdir(data_set):
            data_dir = data_set + os.sep + data_dir_name

            dst_save_data_dir = dst_save_dir + os.sep + data_dir_name
            os.makedirs(dst_save_data_dir, exist_ok=True)

            with open(dst_save_data_dir + os.sep + via_name, "a") as wf:

                with open(data_dir + os.sep + via_name, "r") as rf:
                    for line in rf.readlines():
                        if np.random.random() < probs:
                            src_path = data_dir + os.sep + line.split(" ")[0]
                            dst_path = dst_save_data_dir + os.sep + os.path.basename(src_path)
                            shutil.copy(src_path, dst_path)
                            wf.write(line)


if __name__ == "__main__":

    # # 1  检测图片中车牌并匹配标注信息，及保存
    # data_root= '/home/swls/work_dir/github/paddle/train_data/det/plate/images' 
    # save_dir = "/home/swls/work_dir/github/paddle/train_data/ocr_det/plate/images"
    # background_dir = "/home/swls/work_dir/ocr/code/yolo_train/yolo_plate_np/ccpd_np"
    # via_name = 'via_region_data_ori.json'
    # deal_many_dir(data_root, save_dir, background_dir, via_name=via_name)

    # data_root =  "/home/swls/work_dir/github/paddle/train_data/ocr_det/plate"
    # via_name='via_region_data_ori.json'
    # gen_type = "db_plate"
    # ratio = 0.95
    # via_tool.via_to_db(data_root, via_name, gen_type, ratio)

    # 清除 空文件夹
    # data_root = "/home/swls/work_dir/ocr/code/yolo_train/yolo_plate_text/images"
    # via_name = 'via_region_data_ori.json'
    # filesystem.clear_dir_by_no_via_json(data_root, via_name=via_name)


    # 以一定概率合并多个数据集到最后一个数据集里去
    # data_roots = [
    #     "/home/swls/work_dir/ocr/code/syn/syn_plate_20k",
    #     "/home/swls/work_dir/ocr/code/syn/syn_plate_200k",
    # ]
    # probs = 0.7
    # merge_many_dataset_to_crnn(data_roots, probs)

"""
cd /home/swls/work_dir/git/python_script && conda activate tf3 && export PYTHONPATH=$PYTHONPATH:`pwd` && python sunjie/plate_recognition/gen_crop_plate.py

"""