from ctypes import CDLL,byref,create_string_buffer
from ctypes import cdll
from collections import defaultdict
from genericpath import exists
import os, shutil, io, sys
import os.path as osp
import json, time
import numpy as np
import cv2
import copy
import base64, requests
from multiprocessing import Pool
from tqdm import tqdm
from PIL import Image
from tool import filesystem, via_tool, darknet_tool, opencv_tool # export PYTHONPATH=$PYTHONPATH:`pwd`


class MyEncoder(json.JSONEncoder):
    def default(self, obj):
        if isinstance(obj, np.integer):
            return int(obj)
        elif isinstance(obj, np.floating):
            return float(obj)
        elif isinstance(obj, np.ndarray):
            return obj.tolist()
        elif isinstance(obj, bytes):
            return obj.decode("utf-8")
        else:
            return super(MyEncoder, self).default(obj)

# 查找相同的图片
def find_image_by_size(img_path, find_dir, save_dir):
    img_size = osp.getsize(img_path)

    for f in filesystem.get_all_filepath(find_dir, [".jpg", ".png"]):
        size = osp.getsize(f)
        print(size)
        if img_size == size: 
            save_path = save_dir + os.sep + f.replace(find_dir, "").replace("/", "_")
            shutil.copy(f, save_path)
            print(save_path)



def find_image_by_mean(img_path, find_dir, save_dir):
    """ 检索一张图片 """
    mean = np.mean(cv2.imread(img_path))

    for f in tqdm(filesystem.get_all_filepath(find_dir, [".jpg", ".png"])):
        img = cv2.imread(f)
        m = np.mean(img)
        if abs(mean - m) < 1.: 
            save_path = save_dir + os.sep + f.replace(find_dir, "").replace("/", "_")
            shutil.copy(f, save_path)
            print(f)

def find_similiar_img(data_dir, save_dir):
    """ 查找相似图片 """
    os.makedirs(save_dir, exist_ok=True)
    data_dict = defaultdict(list)

    for f in tqdm(filesystem.get_all_filepath(data_dir, [".jpg", ".png"])):
        img = cv2.imread(f)
        m = np.mean(img)
        data_dict[str(m)[:8]].append(f)

    count = 0
    for k,v in data_dict.items():
        print(len(v))
        if len(v) > 1:
            for idx, f in enumerate(v):
                sp = osp.join(save_dir, str(count) + "_" + "_{}".format(idx).join(osp.basename(f).split(".")))
                shutil.copy(f, sp)
            count+=1

def extract_truck_pickup(data_dir, save_dir, via_name="via_region_data.json"):
    vis_files = filesystem.get_all_filepath(data_dir, [via_name])

    retain_key = ["truck", "pickup"]

    for vf in vis_files:
        with open(vf) as f:
            train_labels = json.loads(f.read())

        save_dict = dict()
        ##创建保存目录    
        save_root = osp.join(save_dir, osp.basename(osp.dirname(vf)))
        os.makedirs(save_root, exist_ok=True)

        for k, one_data in train_labels.items():
            have = False
            file_name = one_data["filename"]

            if not osp.exists(osp.join(osp.dirname(vf), file_name)):
                continue
            for region in one_data["regions"]: 
                if region["region_attributes"]["label"] in retain_key:
                    have = True
                    break
            if have:
                shutil.copy(osp.join(osp.dirname(vf), file_name), osp.join(save_root, file_name))
                save_dict[k] = one_data
        
        with open(save_root + os.sep + via_name, "w") as wf:
            wf.write(json.dumps(save_dict))

class Rec:
    def init(self):
        optimize_recog_dll_path = '/home/xc/work/code/paddle/pdmodel/build/src/libpdmodel_manager.so'
        self.optimize_recog_dll = cdll.LoadLibrary(optimize_recog_dll_path)

        model_path = "/home/xc/work/code/paddle/pdmodel/model"
        model_path_buffer = create_string_buffer(model_path.encode('utf-8'), 65536)

        out = self.optimize_recog_dll.init_model_py(byref(model_path_buffer))
        print("init_model out: ", out)
        return out

    def text_rec_lite_py(self, imgData):
        img_dir_buffer0 = create_string_buffer(imgData, 65536*500)
        out_info = create_string_buffer(65536*560)
        out_2 = self.optimize_recog_dll.text_rec_lite_py(byref(img_dir_buffer0), byref(out_info))
        return out_2, json.loads(out_info.value.decode('utf8'))

# 按照时间戳排序图片
def sort_img_by_time(data_dir, via_name="via_region_data.json"):
    rec = Rec()
    ret = rec.init()
    print("ret: ", ret)

    save_dir = data_dir + "_t"
    os.makedirs(save_dir, exist_ok=True)

    img_files = filesystem.get_all_filepath(data_dir, [".jpg"], False)
    img_time_dict = defaultdict(list)
    cnt = 0
    for f in img_files:
        img = cv2.imread(f)
        # img = img[16:35, 190:262, :]
        # img = img[76:115, 339:482, :]
        img = img[108:178, 203:902, :]
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        ret, binary = cv2.threshold(gray, 200, 255, cv2.THRESH_BINARY)
        img = cv2.cvtColor(binary, cv2.COLOR_GRAY2BGR)

        # cv2.imshow("aa", binary)
        # cv2.waitKey(0)
        # cv2.imwrite(osp.join(save_dir, "test_{}.jpg".format(cnt)), img)
        # cnt+=1

        buffer = io.BytesIO()
        rgb_img = img[:, : , ::-1]
        pil_image = Image.fromarray(rgb_img) 
        pil_image.save(buffer, format="jpeg")
        encoded = base64.b64encode(buffer.getvalue())
        ret, data = rec.text_rec_lite_py(encoded)
        if ret != 0: continue
        time_text = data["text"].replace(":", "").replace("：", "").replace("；", "").replace("，", "")[-6:]
        # if len(time_text) != 6:
        #     print("ERROR: ", data["text"])
        #     continue
        try:
            time_int = int(time_text[0:2]) * 3600 + int(time_text[2:4]) * 60 + int(time_text[4:6])
        except:
            continue
        img_time_dict[time_int].append(f)


    with open(data_dir + os.sep + via_name,"r") as rf:
        data_dict = json.loads(rf.read())
    
    img_time_list = [[k, v] for k,v in img_time_dict.items()]
    img_time_list = sorted(img_time_list, key=lambda x: x[0])

    for idx, (timeline, files) in enumerate(img_time_list):
        file_list = [[f, int(osp.basename(f).split(".")[0])] for f in files]
        file_list = sorted(file_list, key=lambda x: x[1])

        for i, (f, stem) in enumerate(file_list):
            ori_name = osp.basename(f)
            ori_size = str(osp.getsize(f))
            new_name = "{}_{}.jpg".format(idx, i)
            save_path = osp.join(save_dir, new_name)
            shutil.move(f, save_path)

            if data_dict.get(ori_name+ori_size, None):
                item_data = copy.copy(data_dict[ori_name+ori_size])
                item_data["filename"] = new_name
                data_dict[new_name+ori_size] = item_data

    with open(save_dir + os.sep + via_name,"w") as wf:
        wf.write(json.dumps(data_dict))

# 按照时间戳排序图片
def sort_img_by_date(data_dir, save_dir, via_name="via_region_data.json"):
    rec = Rec()
    ret = rec.init()
    print("ret: ", ret)

    img_files = filesystem.get_all_filepath(data_dir, [".jpg"], False)
    img_time_dict = defaultdict(list)
    for f in img_files:
        img = cv2.imread(f)
        # img = img[16:35, 190:262, :]
        # img = img[76:115, 339:482, :]
        img = img[97:185, 203:518, :]

        buffer = io.BytesIO()
        rgb_img = img[:, : , ::-1]
        pil_image = Image.fromarray(rgb_img) 
        pil_image.save(buffer, format="jpeg")
        encoded = base64.b64encode(buffer.getvalue())
        ret, data = rec.text_rec_lite_py(encoded)
        if ret != 0: continue
        time_text = data["text"].replace("-", "")
        if len(time_text) != 8:
            print("ERROR: ", data["text"])
            continue
        try:
            time_int = int(time_text[4:6]) * 30 + int(time_text[6:8])
        except:
            continue
        img_time_dict[time_int].append(f)


    with open(data_dir + os.sep + via_name,"r") as rf:
        data_dict = json.loads(rf.read())
    
    img_time_list = [[k, v] for k,v in img_time_dict.items()]
    img_time_list = sorted(img_time_list, key=lambda x: x[0])

    for idx, (timeline, files) in enumerate(img_time_list):
        file_list = [[f, int(osp.basename(f).split(".")[0][:-1])] for f in files]
        file_list = sorted(file_list, key=lambda x: x[1])

        for i, (f, stem) in enumerate(file_list):
            ori_name = osp.basename(f)
            ori_size = str(osp.getsize(f))
            new_name = "{}_{}.jpg".format(idx, i)
            save_path = osp.join(save_dir, new_name)
            shutil.move(f, save_path)

            if data_dict.get(ori_name+ori_size, None):
                item_data = copy.copy(data_dict[ori_name+ori_size])
                item_data["filename"] = new_name
                data_dict[new_name+ori_size] = item_data

    with open(save_dir + os.sep + via_name,"w") as wf:
        wf.write(json.dumps(data_dict))

# 根据poly裁剪
def crop_by_poly(data_dir, xs, ys):
    save_dir = data_dir + "_crop"
    os.makedirs(save_dir, exist_ok=True)

    files = filesystem.get_all_filepath(data_dir, [".jpg"])
    minx, maxx = min(xs), max(xs)
    miny, maxy = min(ys), max(ys)
    poly = [[x-minx,y-miny] for x,y in zip(xs, ys)]
    for f in files:
        img = cv2.imread(f)
        img = img[miny:maxy, minx:maxx]
        mask = np.zeros(img.shape[:2], dtype=np.uint8)
        cv2.fillConvexPoly(mask, np.array(poly), [255])
        bg = np.zeros(img.shape, dtype=np.uint8)
        cv2.copyTo(img, mask, bg)
        cv2.imwrite(osp.join(save_dir, osp.basename(f)), bg)

def post_rec(img_b64):
    data = { 
        "img_b64": img_b64,
        # 1 VihicleDetection det
        # 3 SlopeDetection   seg
        # 1 FireWarning      det
        # 1 TextRec          text_rec
        # 1 PedestrianDetection det
        "mdl_idx": 1,
        "algo_type": "VihicleDetection",
        "mdl_type": "det"
    }
    # url = "http://192.168.10.163:9999/api/rec"
    url = "http://192.168.200.125:9999/api/rec"
    headers = {'Content-Type': 'application/json;charset=UTF-8'}
    string = json.dumps(data, cls=MyEncoder)
    r = requests.post(url, data=string, headers=headers)
    rec_data = r.json()
    if rec_data["code"] != 0: 
        return None
    return rec_data

def http_rec_to_via(file_path, via_name="via_region_data.json"):
    one_data = {}
    if not osp.exists(file_path): return None
    
    file_size = osp.getsize(file_path)
    filename = osp.basename(file_path)
    one_data["filename"] = filename 
    one_data["size"] = file_size 
    one_data["file_attributes"] = {}

    buffer = io.BytesIO()
    img = cv2.imread(file_path)
    rgb_img = img[:, : , ::-1]
    pil_image = Image.fromarray(rgb_img) 
    pil_image.save(buffer, format="jpeg")
    encoded = base64.b64encode(buffer.getvalue())
    rec_data = post_rec(encoded)
    
    regions = []
    if rec_data is None or rec_data["code"] != 0: 
        pass
    else:
        exist_dict = {}
        for item in rec_data["items"]:
            x, y, w, h, label = item["x"], item["y"], item["w"], item["h"], item["category"]
            if exist_dict.get("{}x{}".format(x,y), None): continue
            exist_dict["{}x{}".format(x,y)] = 1

            region = {
                "shape_attributes": {
                    "name": "rect",
                    "x": x,
                    "y": y,
                    "width": w,
                    "height": h
                },
                "region_attributes":{
                    "label": label
                }
            }
            regions.append(region)

    one_data["regions"] = regions
    return one_data        

def distribute(sub_dir, data_dict, start_cnt=0, per_img_in_dir=300, via_name="via_region_data.rec.json"):

    dir_cnt = data_dict.__len__() // per_img_in_dir
    dir_cnt = dir_cnt if data_dict.__len__() % per_img_in_dir == 0 else dir_cnt+1
    
    save_dirs = [osp.join(sub_dir, str(i + start_cnt)) for i in range(dir_cnt)]
    save_dicts = [{} for i in range(dir_cnt)]
    for save_dir in save_dirs: os.makedirs(save_dir, exist_ok=True)

    for idx, (f, v) in enumerate(data_dict.items()):
        choose = idx % dir_cnt
        shutil.move(f, osp.join(save_dirs[choose], osp.basename(f)))
        save_dicts[choose][osp.basename(f)+str(v["size"])]= v

    for idx, save_dir in enumerate(save_dirs):
        with open(osp.join(save_dir, via_name), "w") as rf:
            rf.write(json.dumps(save_dicts[idx]))


def auto_split_data(data_dir, ratio=1/20):
    """
    自动解压 切分数据
    """
    targz_files = filesystem.get_all_filepath(data_dir, [".tar"])

    for tarf in targz_files:
        cur_dir = osp.join(osp.dirname(tarf), "day")
        os.makedirs(cur_dir, exist_ok=True)
        filesystem.extract_targz(tarf, cur_dir, "r:")
        
        filesystem.remove_file_by_name(cur_dir, ["big_truck.jpg", "car_stop.jpg", "move_slow.jpg", "converse_move.jpg"], ["_train.jpg"])
        # remove replicate files
        opencv_tool.remove_image_by_mean(cur_dir, 1e4)

        total_imgs = filesystem.get_all_filepath(cur_dir, [".jpg"])

        # move 18:00 ~ 08:00 to night
        night_dir = osp.join(osp.dirname(tarf), "night")
        os.makedirs(night_dir, exist_ok=True)
        for path in total_imgs:
            time_point = int(osp.basename(path).split("_")[0]) // 1000
            time_str = time.strftime("%Y-%m-%d-%H-%M-%S", time.gmtime(time_point + 60*60*8))
            hour = int(time_str.split("-")[3])
            if hour >= 8 and hour < 18: continue
            shutil.move(path, osp.join(night_dir, osp.basename(path)))

        # # os.remove(tarf)


def merge_img_and_distribute3(data_dir, per_img_in_dir=300, via_name="via_region_data.rec.json"):
    """将一个文件夹下面的所有图片合并重新分配"""
    sub_dirs = filesystem.get_last_dir(data_dir, -1)

    for sub_dir in sub_dirs:
        img_files = filesystem.get_all_filepath(sub_dir, [".jpg"])
        if len(img_files) == 0: 
            print("contniue: ", sub_dir)
            continue

        print(sub_dir)
        if sub_dir.endswith("night"):
            save_dicts = {}
            for f in tqdm(img_files):
                one_data = http_rec_to_via(f)
                key = one_data["filename"] + str(one_data["size"])
                save_dicts[key] = one_data

            with open(osp.join(sub_dir, via_name), "w") as rf:
                rf.write(json.dumps(save_dicts))
            
        elif sub_dir.endswith("day"):
            rec_dict = {}
            rec_dict_two = {}
            for f in tqdm(img_files):
                one_data = http_rec_to_via(f)
                if one_data is None: continue
                if len(one_data["regions"]) >= 2:
                    rec_dict_two[f] = one_data
                else:
                    rec_dict[f] = one_data

            distribute(osp.dirname(sub_dir), rec_dict_two, 0, per_img_in_dir, via_name)
            distribute(osp.dirname(sub_dir), rec_dict, 1000, per_img_in_dir, via_name)
            shutil.rmtree(sub_dir)

if __name__ == "__main__":

    # img_path = "/home/xc/work/data/car/data/image/04-09/Screenshot_20210409_170609_uni.UNIF0607A1.jpg"
    # find_dir = "/home/xc/work/code/paddle/train_data/det/car/images/real"
    # save_dir = "/home/xc/work/code/paddle/train_data/det/car/images/test"
    # # find_image_by_size(img_path, find_dir, save_dir)
    # find_image_by_mean(img_path, find_dir, save_dir)

    # data_dir = "/home/xc/work/code/paddle/train_data/det/car/images/real"
    # save_dir = "/home/xc/work/code/paddle/train_data/det/car/images/test"
    # find_similiar_img(data_dir, save_dir)

    # 取truck pickup类进行单独标记
    # data_dir = "/home/xc/work/code/paddle/train_data/det/car/images/real"
    # save_dir = "/home/xc/work/code/paddle/train_data/det/car/images/copy"
    # extract_truck_pickup(data_dir, save_dir)

    # data_dir = sys.argv[1]
    # sort_img_by_time(data_dir)

    # data_dir = sys.argv[1]
    # # small
    # xs = [138,150,163,174,187,208,252,296,316,308,286,263,242,219,163,156,138]
    # ys = [194,214,231,249,266,267,260,249,246,230,210,193,183,172,183,186,194]
    # big
    # xs = [403,425,463,503,556,581,677,795,890,943,905,819,742,667,581,475]
    # ys = [581,617,658,716,790,805,797,775,759,737,672,608,552,516,519,556]

    # xs = [441,468,508,561,606,710,884,967,988,955,885,819,746,692,629,441]
    # ys = [636,693,742,816,859,845,810,780,748,703,649,592,569,546,548,636]

    # xs = [583,593,626,663,707,746,841,991,1093,1089,1026,962,916,855,795,583]
    # ys = [578,622,663,709,757,809,793,775,746,697,647,592,558,529,504,578]
    # crop_by_poly(data_dir, xs, ys)



    # data_dir = "/home/xc/work/code/paddle/train_data/det/car/images/2021-06-11/1"
    # save_dir = "/home/xc/work/code/paddle/train_data/det/car/images/2021-06-11/1_t"
    # sort_img_by_date(data_dir, save_dir)

    # # 1. 解压 分配图片
    # data_dir = sys.argv[1]
    # auto_split_data(data_dir)

    # # 2. 识别数据
    # data_dir = sys.argv[1]
    # merge_img_and_distribute3(data_dir)

    # data_root = "/home/xc/work/code/paddle/train_data/det/car"
    # via_name="via_region_data.merge.json" 
    # gen_type="car"
    # darknet_tool.deal_many_dir_local(data_root, via_name, gen_type)
    # darknet_tool.create_train_val_txt(data_root)
    # darknet_tool.check_darket_train_data(data_root)



    # data_root = r"D:\work\dataset\det\dust"
    # save_dir = r"D:\work\dataset\det\dust.2"
    # via_name=".json" 
    # gen_type="dust"
    # # darknet_tool.deal_many_dir(data_root, save_dir, via_name, True, gen_type)
    # darknet_tool.create_train_val_txt(save_dir)
    # darknet_tool.check_darket_train_data(save_dir)

    # data_root = r"F:\work\dataset\det\boat_smoke"
    # save_dir = r"F:\work\dataset\det\boat_smoke\dataset"
    # via_name="via_region_data.json" 
    # gen_type="boat_smoke"
    # # gen_type="ringelman"
    # darknet_tool.deal_many_dir(data_root, save_dir, via_name, True, gen_type)
    # darknet_tool.create_train_val_txt(save_dir)
    # darknet_tool.check_darket_train_data(save_dir)
    
    data_root = r"F:\work\dataset\det\dust"
    save_dir = r"F:\work\dataset\det\dust\dataset"
    via_name="via_region_data.json" 
    gen_type="dust"
    darknet_tool.deal_many_dir(data_root, save_dir, via_name, True, gen_type)
    darknet_tool.create_train_val_txt(save_dir)
    darknet_tool.check_darket_train_data(save_dir)
    pass
