import sys
sys.path.append('..') #表示导入当前文件的上层目录到搜索路径中
sys.path.append('/home/zengxh/workspace/python_developer_tools') # 绝对路径
import argparse
import os
import shutil
import random

import copy
import numpy as np
import cv2
import multiprocessing
import yaml
from imutils import paths
from sklearn.model_selection import train_test_split
from tqdm import tqdm
from PIL import Image, ImageOps
from dataset.constants import NORMALIZATION_WIDTH, NORMALIZATION_HEIGHT, RANDOM_SEED, CLASSES_MAP
from python_developer_tools.cv.datasets.datasets_utils import resize_image, letterbox
from python_developer_tools.files.common import mkdir, get_filename_suf_pix
from python_developer_tools.files.json_utils import read_json_file, save_json_file
from python_developer_tools.python.threadings.multiprocessing_utils import parmap
from python_developer_tools.python.list_utils import find_all_index
from python_developer_tools.cv.utils.PIL_utils import PIL2cv2
from python_developer_tools.cv.utils.torch_utils import init_seeds
from python_developer_tools.cv.datasets.classes.APR import APRecombination

def createDatasets(datasets, dirname):
    dataDir = os.path.join(data_dict_tmp[dirname])
    if not os.path.exists(dataDir):
        os.makedirs(dataDir)
    for datapath in datasets:
        shutil.copy(datapath.replace(".jpg",".json"), dataDir)
        shutil.copy(datapath, dataDir)

def get_origin_image_points(imagePath):
    img = cv2.imread(imagePath)
    jsonfile = imagePath.replace(".jpg", ".json")
    json_cont = read_json_file(jsonfile)
    labels_tmp = []
    classes = []
    for shapes in json_cont["shapes"]:
        label = shapes["label"]
        points = shapes["points"]
        classes.append(CLASSES_MAP[label])
        labels_tmp.append(points)
    return img,np.array(labels_tmp),classes

def get_json_pts(json_path):
    json_cont = read_json_file(json_path)
    edges_pts = []
    labels = []
    for shapes in json_cont["shapes"]:
        points = shapes["points"]
        label = shapes["label"]
        edges_pts.append(points)
        labels.append(label)

    if labels[0] == 'min' and labels[1] == 'max':
        edges_pts = edges_pts[::-1]

    return edges_pts

def label_transpose_1(label_o,w0,h0):
    # tb 顺时针旋转90°
    new_label = copy.deepcopy(label_o)

    new_label[:, :, 1] = label_o[:, :, 0]  # x
    new_label[:, :, 0] = w0 - label_o[:, :, 1]  # y
    return new_label

def labels_convert_train(label,w0,h0,w1,h1,w2,h2,padw,padh):
    label[:, :, 0] = ((label[:, :, 0] * w1 / w0) * w2) / w1 + padw  # x
    label[:, :, 1] = ((label[:, :, 1] * h1 / h0) * h2) / h1 + padh  # y
    # label = [i / w0 for i in label]
    # label = [i * w1 / w0 for i in label]
    # label = [(i * w2+ padw) / w1 for i in label]
    # label = [i / NORMALIZATION_WIDTH for i in label]
    return label

def yellowToBlue(img_path):
    """黄图转蓝图"""
    img = Image.open(img_path)  # name of the file is his_equi.jpg
    edited = ImageOps.autocontrast(img, cutoff=3)
    imgedited = PIL2cv2(edited)
    return imgedited

def convert_img(img,labels_tmp,classes,filename):
    if key == "tb":
        img = cv2.transpose(img)
        img = cv2.flip(img, 1)
        h0, w0 = img.shape[:2]  # orig hw
        labels_tmp = label_transpose_1(labels_tmp, w0, h0)
        classes = classes[::-1]
        # for (a, b) in labels_tmp:
        #     _ = cv2.line(img, (int(a[0]), int(a[1])), (int(b[0]), int(b[1])), (0, 255, 0), thickness=2)
        # cv2.imwrite("sdf.jpg", img)
    h0, w0 = img.shape[:2]

    labels_tmp = labels_tmp[np.argsort(np.mean(labels_tmp[:, :, 0], axis=1))]  # 对(这里[:,:,0]是X)X 进行排序
    for i in range(len(labels_tmp)):
        labels_tmp[i, :, :] = labels_tmp[i, :, :][np.argsort(labels_tmp[i, :, :][:, 1])]  # 对(这里[:,:,1]是Y)Y 进行排序

    img = resize_image(img, [NORMALIZATION_HEIGHT, NORMALIZATION_WIDTH])
    h1, w1, _ = img.shape
    img, ratio, pad = letterbox(img, [NORMALIZATION_HEIGHT, NORMALIZATION_WIDTH], auto=False, scaleFill=True)
    #letterbox(img, img_size, auto=False,scaleFill=True)  # 会填充边缘 letterbox(img, self.opt.img_size, auto=False, scaleup=False)
    labels_tmp = labels_convert_train(labels_tmp, w0, h0, w1, h1, ratio[0] * w1, ratio[1] * h1, pad[0], pad[1])

    h2, w2 = img.shape[:2]
    dict_json = {"filename": filename,
                 "lines": [[a[0],a[1],b[0],b[1]] for (a,b) in labels_tmp],
                 "classes":classes,
                 "height": h2, "width": w2}
    cv2.imwrite(os.path.join(images_dir, filename), img)
    return dict_json

def get_dict_json(imagePath):
    results = []

    filename, filedir, filesuffix, filenamestem = get_filename_suf_pix(imagePath)
    img, labels_tmp,classes = get_origin_image_points(imagePath)

    dict_json = convert_img(img,labels_tmp,classes,filename)
    results.append(dict_json)

    if max(img.shape) < 2100: # 黄色图为1400 蓝色图为2800，所以1400+(2800-1400)/2
        img = yellowToBlue(imagePath)
        dict_json = convert_img(img, labels_tmp, classes, "YB"+filename)
        results.append(dict_json)

    # PIL_origin_image = Image.open(imagePath)
    # apr = APRecombination(PIL_origin_image.size)
    # apr_img = apr(PIL_origin_image)
    # apr_img = PIL2cv2(apr_img)
    # dict_json = convert_img(apr_img, labels_tmp, classes, "APR" + filename)
    # results.append(dict_json)

    return results

def package_dictjson(train_json,dict_jsons):
    for subdict_jsons in dict_jsons:
        train_json.extend(subdict_jsons)
    return train_json

if __name__ == '__main__':
    init_seeds(RANDOM_SEED)
    parser = argparse.ArgumentParser(description="获取杭州人工认为有缺陷大图")
    parser.add_argument('--data',
                        default=r"creepageDistance.yaml",
                        help="没有分的文件夹")
    parser.add_argument('--datasets_path',
                        default=r"/home/zengxh/datasets/creepageDistance",
                        help="没有分的文件夹")
    opt = parser.parse_args()

    images_dir = os.path.join(opt.datasets_path,"images")
    mkdir(images_dir)

    with open(opt.data,encoding="utf-8") as f:
        data_dict = yaml.load(f, Loader=yaml.FullLoader)  # data dict

    train_json = []
    valid_json = []
    for (key, data_dict_tmp) in data_dict["datasets"].items():
        allimagepaths = list(paths.list_images(os.path.join(data_dict_tmp["allDatas"])))
        # 训练实验用的数据集

        hasquestion = data_dict_tmp.__contains__("questions")
        if hasquestion:
            all_img_names = [get_filename_suf_pix(imagepath)[0] for imagepath in allimagepaths]
            question_paths = []
            for question_path in paths.list_images(os.path.join(data_dict_tmp["questions"])):
                filename = get_filename_suf_pix(question_path)[0]
                if filename in all_img_names:
                    index = find_all_index(all_img_names, filename)[0]
                    question_paths.append(allimagepaths[index])
                    del all_img_names[index]
                    del allimagepaths[index]

        random.shuffle(allimagepaths)  # 打乱顺序
        X_train, X_test_val, _, _ = train_test_split(allimagepaths, allimagepaths, test_size=0.05, random_state=RANDOM_SEED)

        # for imagePath in X_train:
        #     dict_json = get_dict_json(imagePath)
        #     train_json.append(dict_json)
        # for imagePath in X_test_val:
        #     dict_json = get_dict_json(imagePath)
        #     train_json.append(dict_json)

        dict_jsons = parmap(get_dict_json, X_train, 32)
        train_json=package_dictjson(train_json, dict_jsons)
        dict_jsons = parmap(get_dict_json, X_test_val, 32)
        valid_json = package_dictjson(valid_json, dict_jsons)

        if hasquestion:
            dict_jsons = parmap(get_dict_json, question_paths, 32)
            train_json=package_dictjson(train_json, dict_jsons)

        # for imagePath in X_test_val:
        #     dict_json = get_dict_json(imagePath)
        #     valid_json.append(dict_json)

        # 最终确定最好的trick后发布用的数据集deploy train datasets
        # dict_jsons = parmap(get_dict_json, nameImgs, 32)
        # train_json.extend(dict_jsons)
        #
        # if data_dict_tmp.__contains__("test"):
        #     X_test = list(paths.list_images(os.path.join(data_dict_tmp["test"])))
        #     dict_jsons = parmap(get_dict_json, X_test, 32)
        #     valid_json.extend(dict_jsons)

    save_json_file(os.path.join(opt.datasets_path,"train.json"),train_json)
    save_json_file(os.path.join(opt.datasets_path,"valid.json"),valid_json)