# -*- encoding: utf-8 -*-
'''
@Filename    : auxiliary_tools.py
@Description : 
@Datatime    : 2022/07/06 13:33:47
@Author      : hqjin
'''

import os
from pathlib import Path

import cv2
import json
import math

import numpy
import numpy as np
import skimage
import file_utils
import img_utils
import argparse
import shutil
import ujson
from multiprocessing import Pool
from PIL import Image  # 用cv2读取大图可能会失败，所以需要PIL库，具体用法：src_img = np.array(Image.open(src_img_path))

from segment_anything.predictor_pool import PredictorPool
from segment_anything.samUtils import generate_mask_with_sam, addPoint, recall

Image.MAX_IMAGE_PIXELS = None
from shapely.geometry import Polygon


def invert_imgs(src_dir, dst_dir):
    """原始的电镜数据图片, 里面的像素值是255-的关系, 对所有图像255-, 保存，以便观察"""
    src_dir = file_utils.get_abs_dir(src_dir)
    dst_dir = file_utils.create_dir(dst_dir)

    for fname in os.listdir(src_dir):
        abs_fname = os.path.join(src_dir, fname)
        if not os.path.isfile(abs_fname):
            continue

        suffix = fname.split(".")[-1]
        if suffix != "bmp" or "thumbnail" in fname:
            continue

        img = cv2.imread(abs_fname, 0)
        img = img_utils.img_pixel_value_invert(img)
        cv2.imwrite(os.path.join(dst_dir, fname), img)


def get_tile_layout_by_json(json_file, save_img_file):
    """根据json获取61个tile的排列方式, 并以图片方式保存"""
    json_file = file_utils.get_abs_file_path(json_file)
    file_utils.create_save_dir_from_file(save_img_file)

    with open(json_file, "r") as f:
        json_list = json.load(f)

    if len(json_list) < 1 or len(json_list) % 61 != 0:
        raise Exception("Error! json_list less than 1 or not a multiple of 61.")

    # Only 61 tiles' bbox of the same section and the same MFOV are required
    sec_idx = json_list[0]["layer"]
    mFov_idx = json_list[0]["mfov"]

    idx_bbox_dict = {}
    for tile in json_list:
        if tile["layer"] == sec_idx and tile["mfov"] == mFov_idx:
            img_path = tile["mipmapLevels"]["0"]["imageUrl"]  # like: */001_000001_001_2022-05-08T1237143142689.bmp
            img_fname = img_path.split("/")[-1]
            tmp_list = img_fname.split("_")
            if len(tmp_list) != 4 or not tmp_list[2].isdigit():
                raise Exception(
                    "Error! Image name {} should be like 001_000001_001_2022-05-08T1237143142689.bmp.".format(
                        img_fname))

            img_idx = int(tmp_list[2])
            if img_idx in idx_bbox_dict.keys():
                raise Exception("Error! Repetitive section index {}".format(img_idx))

            idx_bbox_dict[img_idx] = tile["bbox"]

    # Reduce the size of bbox by 20 times
    for idx, bbox in idx_bbox_dict.items():
        bbox[0] = int(math.floor(bbox[0] / 20))
        bbox[1] = int(math.ceil(bbox[1] / 20))
        bbox[2] = int(math.floor(bbox[2] / 20))
        bbox[3] = int(math.ceil(bbox[3] / 20))
        idx_bbox_dict[idx] = bbox

    min_x, min_y = np.iinfo(np.int32).max, np.iinfo(np.int32).max
    max_x, max_y = 0, 0
    for idx, bbox in idx_bbox_dict.items():
        if bbox[0] < min_x:
            min_x = bbox[0]
        if bbox[2] < min_y:
            min_y = bbox[2]
        if bbox[1] > max_x:
            max_x = bbox[1]
        if bbox[3] > max_y:
            max_y = bbox[3]

    for idx, bbox in idx_bbox_dict.items():
        bbox[0] -= min_x
        bbox[1] -= min_x
        bbox[2] -= min_y
        bbox[3] -= min_y
        idx_bbox_dict[idx] = bbox

    whole_img_w = max_x - min_x
    whole_img_h = max_y - min_y
    whole_img = np.ones([whole_img_h, whole_img_w, 3], np.uint8) * 255
    for idx, bbox in idx_bbox_dict.items():
        color = np.random.randint(0, 255, 3, dtype=np.uint8)
        cv2.rectangle(whole_img, (bbox[0], bbox[2]), (bbox[1], bbox[3]), (int(color[0]), int(color[1]), int(color[2])),
                      2)
        if idx < 10:
            txt_center = (int((bbox[0] + bbox[1]) / 2) - 12, int((bbox[2] + bbox[3]) / 2) + 15)
        else:
            txt_center = (int((bbox[0] + bbox[1]) / 2) - 32, int((bbox[2] + bbox[3]) / 2) + 15)
        cv2.putText(whole_img, str(idx), txt_center, cv2.FONT_HERSHEY_SIMPLEX, 2,
                    (int(color[0]), int(color[1]), int(color[2])), 2)

    cv2.imwrite(save_img_file, whole_img)


def get_mFov_layout_by_json(json_file, save_img_file):
    """根据json获取mFov的排列方式, 并以图片方式保存"""
    json_file = file_utils.get_abs_file_path(json_file)
    file_utils.create_save_dir_from_file(save_img_file)

    with open(json_file, "r") as f:
        json_list = json.load(f)

    if len(json_list) < 1 or len(json_list) % 61 != 0:
        raise Exception("Error! json_list less than 1 or not a multiple of 61.")

    sec_idx = json_list[0]["layer"]
    for tile in json_list:
        if tile["layer"] != sec_idx:
            raise Exception("Error! Find different layer in the json_file.")

    idx_bbox_dict = {}
    cur_mFov_idx = json_list[0]["mfov"]
    cur_mFov_bbox = []
    for tile in json_list:
        mFov_idx = tile["mfov"]
        if mFov_idx == cur_mFov_idx:
            cur_mFov_bbox.append(tile["bbox"])
        else:
            idx_bbox_dict[cur_mFov_idx] = cur_mFov_bbox
            cur_mFov_idx = mFov_idx
            cur_mFov_bbox = []
            cur_mFov_bbox.append(tile["bbox"])
    idx_bbox_dict[cur_mFov_idx] = cur_mFov_bbox

    for idx, bbox in idx_bbox_dict.items():
        bbox = np.array(bbox)
        bbox = [np.min(bbox[:, 0]), np.max(bbox[:, 1]), np.min(bbox[:, 2]), np.max(bbox[:, 3])]
        bbox[0] = int(math.floor(bbox[0] / 200))  # Reduce the size of bbox by 200 times
        bbox[1] = int(math.ceil(bbox[1] / 200))
        bbox[2] = int(math.floor(bbox[2] / 200))
        bbox[3] = int(math.ceil(bbox[3] / 200))
        idx_bbox_dict[idx] = bbox

    min_x, min_y = np.iinfo(np.int32).max, np.iinfo(np.int32).max
    max_x, max_y = 0, 0
    for idx, bbox in idx_bbox_dict.items():
        if bbox[0] < min_x:
            min_x = bbox[0]
        if bbox[2] < min_y:
            min_y = bbox[2]
        if bbox[1] > max_x:
            max_x = bbox[1]
        if bbox[3] > max_y:
            max_y = bbox[3]

    for idx, bbox in idx_bbox_dict.items():
        bbox[0] -= min_x
        bbox[1] -= min_x
        bbox[2] -= min_y
        bbox[3] -= min_y
        idx_bbox_dict[idx] = bbox

    whole_img_w = max_x - min_x
    whole_img_h = max_y - min_y
    whole_img = np.ones([whole_img_h, whole_img_w, 3], np.uint8) * 255
    for idx, bbox in idx_bbox_dict.items():
        color = np.random.randint(0, 255, 3, dtype=np.uint8)
        cv2.rectangle(whole_img, (bbox[0], bbox[2]), (bbox[1], bbox[3]), (int(color[0]), int(color[1]), int(color[2])),
                      2)
        if idx < 10:
            txt_center = (int((bbox[0] + bbox[1]) / 2) - 18, int((bbox[2] + bbox[3]) / 2) + 18)
        else:
            txt_center = (int((bbox[0] + bbox[1]) / 2) - 38, int((bbox[2] + bbox[3]) / 2) + 18)
        cv2.putText(whole_img, str(idx), txt_center, cv2.FONT_HERSHEY_SIMPLEX, 2,
                    (int(color[0]), int(color[1]), int(color[2])), 2)

    cv2.imwrite(save_img_file, whole_img)


def get_mFov_layout_by_txt(txt_file, save_img_file):
    """根据txt获取mFov的排列方式, 并以图片方式保存"""
    txt_file = file_utils.get_abs_file_path(txt_file)
    file_utils.create_save_dir_from_file(save_img_file)

    mFov_idx = []
    start_x, start_y = [], []
    with open(txt_file, "r") as f:
        lines = f.readlines()
        lines = sorted(lines)
        for line in lines:
            line_list = line.strip().split("\t")
            img_fname = line_list[0].replace("\\", "/")
            if not (img_fname.split("/")[0]).isdigit():
                continue

            mFov_idx.append(int(img_fname.split("/")[0]))
            start_x.append(float(line_list[1]))
            start_y.append(float(line_list[2]))

    start_x = np.array(start_x)
    start_y = np.array(start_y)
    start_x -= np.min(start_x)
    start_y -= np.min(start_y)

    idx_bbox_dict = {}
    cur_mFov_idx = mFov_idx[0]
    cur_mFov_bbox = []
    for idx, x, y in zip(mFov_idx, start_x, start_y):
        if idx == cur_mFov_idx:
            cur_mFov_bbox.append([int(math.floor(x)), int(math.ceil(x + 3876)), int(math.floor(y)),
                                  int(math.ceil(y + 3376))])  # left, right, top, down
        else:
            idx_bbox_dict[cur_mFov_idx] = cur_mFov_bbox
            cur_mFov_idx = idx
            cur_mFov_bbox = []
            cur_mFov_bbox.append(
                [int(math.floor(x)), int(math.ceil(x + 3876)), int(math.floor(y)), int(math.ceil(y + 3376))])
    idx_bbox_dict[cur_mFov_idx] = cur_mFov_bbox

    for idx, bbox in idx_bbox_dict.items():
        bbox = np.array(bbox)
        bbox = [np.min(bbox[:, 0]), np.max(bbox[:, 1]), np.min(bbox[:, 2]), np.max(bbox[:, 3])]
        bbox[0] = int(math.floor(bbox[0] / 200))  # Reduce the size of bbox by 200 times
        bbox[1] = int(math.ceil(bbox[1] / 200))
        bbox[2] = int(math.floor(bbox[2] / 200))
        bbox[3] = int(math.ceil(bbox[3] / 200))
        idx_bbox_dict[idx] = bbox

    max_x, max_y = 0, 0
    for idx, bbox in idx_bbox_dict.items():
        if bbox[1] > max_x:
            max_x = bbox[1]
        if bbox[3] > max_y:
            max_y = bbox[3]

    whole_img = np.ones([max_y, max_x, 3], np.uint8) * 255
    for idx, bbox in idx_bbox_dict.items():
        color = np.random.randint(0, 255, 3, dtype=np.uint8)
        cv2.rectangle(whole_img, (bbox[0], bbox[2]), (bbox[1], bbox[3]), (int(color[0]), int(color[1]), int(color[2])),
                      2)
        if idx < 10:
            txt_center = (int((bbox[0] + bbox[1]) / 2) - 18, int((bbox[2] + bbox[3]) / 2) + 18)
        else:
            txt_center = (int((bbox[0] + bbox[1]) / 2) - 38, int((bbox[2] + bbox[3]) / 2) + 18)
        cv2.putText(whole_img, str(idx), txt_center, cv2.FONT_HERSHEY_SIMPLEX, 2,
                    (int(color[0]), int(color[1]), int(color[2])), 2)

    cv2.imwrite(save_img_file, whole_img)


def check_rect_in_img_max_contour(img_path, rect):
    """对每一个mFov图片提取最大轮廓, 并判断指定的rect是否在轮廓内, 若在, 返回True; 否则, 返回False.
    其中, rect:[left, top, width, height]"""
    img_path = file_utils.get_abs_file_path(img_path)

    if rect[0] < 0:
        rect[0] = 0
    if rect[1] < 0:
        rect[1] = 0
    if rect[2] <= 0 or rect[3] <= 0:
        raise Exception("Error! {} is illegal!".format(rect))

    img = cv2.imread(img_path, 0)
    # 阈值化，有效区域为255，无效区域为0
    ret, thres_img = cv2.threshold(img, 254, 255, cv2.THRESH_BINARY_INV)

    tmp_str_list = img_path.rsplit(".", 1)

    # # 在灰度图上画框查看位置
    # cv2.rectangle(img, (rect[0], rect[1]), (rect[0] + rect[2], rect[1] + rect[3]), 0, 10)
    # cv2.imwrite(tmp_str_list[0] + "_tmp." + tmp_str_list[1], img)

    contours, hierarchy = cv2.findContours(thres_img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
    contours = list(contours)
    contours.sort(key=lambda c: cv2.contourArea(c), reverse=True)

    # # 画最大轮廓
    # thres_img2 = np.zeros(thres_img.shape, np.uint8)
    # cv2.drawContours(thres_img2, contours, 0, 255, cv2.FILLED)
    # cv2.imwrite(tmp_str_list[0] + "_tmp2." + tmp_str_list[1], thres_img2)

    if img.shape[0] < rect[1] + rect[3] or img.shape[1] < rect[0] + rect[2]:
        print("Warning! rect {} exceed img size {} of {}.".format(rect, img.shape, img_path))
        return False

    for x in range(int(rect[0]), int(rect[0] + rect[2])):
        ret = cv2.pointPolygonTest(contours[0], (x, rect[1]), False)
        if ret < 0:
            print("Warning! rect {} not in the contour of img {}.".format(rect, img_path))
            return False

    for x in range(int(rect[0]), int(rect[0] + rect[2])):
        ret = cv2.pointPolygonTest(contours[0], (x, rect[1] + rect[3]), False)
        if ret < 0:
            print("Warning! rect {} not in the contour of img {}.".format(rect, img_path))
            return False

    for y in range(int(rect[1]), int(rect[1] + rect[3])):
        ret = cv2.pointPolygonTest(contours[0], (rect[0], y), False)
        if ret < 0:
            print("Warning! rect {} not in the contour of img {}.".format(rect, img_path))
            return False

    for y in range(int(rect[1]), int(rect[1] + rect[3])):
        ret = cv2.pointPolygonTest(contours[0], (rect[0] + rect[2], y), False)
        if ret < 0:
            print("Warning! rect {} not in the contour of img {}.".format(rect, img_path))
            return False

    return True


def loop_check_rect_in_img_max_contour(img_dir, rect_w, rect_h):
    """轮询文件夹下的所有图片, 对每一个图片提取最大轮廓, 并判断指定的rect是否在轮廓内, 若在, 返回True; 否则, 返回False.
    其中, rect只给定宽和高, 中心是图片中心, 所以可以算出对应每张图片其相应的rect是什么"""
    img_dir = file_utils.get_abs_dir(img_dir)

    rect_w = int(rect_w)
    rect_h = int(rect_h)
    total_img_num, dissatisfy_img_num = 0, 0
    for sub_dir in os.listdir(img_dir):
        abs_sub_dir = os.path.join(img_dir, sub_dir)
        if not os.path.isdir(abs_sub_dir):
            continue

        for file in os.listdir(abs_sub_dir):
            abs_file_path = os.path.join(abs_sub_dir, file)
            if not os.path.isfile(abs_file_path):
                continue
            if file.split(".")[-1].lower() not in ["jpg", "jpeg", "png", "bmp"]:
                continue

            img = cv2.imread(abs_file_path, 0)
            rect_x = int(img.shape[1] / 2 - rect_w / 2)
            rect_y = int(img.shape[0] / 2 - rect_h / 2)
            rect = [rect_x, rect_y, rect_w, rect_h]
            ret = check_rect_in_img_max_contour(abs_file_path, rect)
            if not ret:
                dissatisfy_img_num += 1
            total_img_num += 1

    print("total img num: {}, dissatisfy img num: {}, ratio: {}%".format(total_img_num, dissatisfy_img_num,
                                                                         round(dissatisfy_img_num / total_img_num * 100,
                                                                               2)))


def crop_img_by_rect(rect, src_img_path, dst_img_path):
    """根据指定的rect对每一个mFov图片crop出相应的图片并保存.
    其中, rect:[left, top, width, height]"""
    src_img_path = file_utils.get_abs_file_path(src_img_path)
    file_utils.create_save_dir_from_file(dst_img_path)

    if rect[0] < 0:
        rect[0] = 0
    if rect[1] < 0:
        rect[1] = 0
    if rect[2] <= 0 or rect[3] <= 0:
        raise Exception("Error! {} is illegal!".format(rect))

    src_img = cv2.imread(src_img_path, 0)
    if src_img.shape[0] < rect[1] + rect[3] or src_img.shape[1] < rect[0] + rect[2]:
        print("Warning! rect {} exceed img size {} of {}.".format(rect, src_img.shape[:2], src_img_path))

    dst_img = src_img[rect[1]:rect[1] + rect[3], rect[0]:rect[0] + rect[2]]
    cv2.imwrite(dst_img_path, dst_img)


def loop_crop_img_by_rect(rect_w, rect_h, src_img_dir, dst_img_dir, dst_img_name_prefix=""):
    """轮询文件夹下的所有图片, 对每一个图片根据rect crop出相应的图片并保存.
    其中, rect只给定宽和高, 中心是图片中心, 所以可以算出对应每张图片其相应的rect是什么"""
    src_img_dir = file_utils.get_abs_dir(src_img_dir)
    dst_img_dir = file_utils.create_dir(dst_img_dir)

    for sub_dir in os.listdir(src_img_dir):
        abs_sub_dir = os.path.join(src_img_dir, sub_dir)
        if not os.path.isdir(abs_sub_dir):
            continue

        for file in os.listdir(abs_sub_dir):
            abs_file_path = os.path.join(abs_sub_dir, file)
            if not os.path.isfile(abs_file_path):
                continue
            if file.split(".")[-1].lower() not in ["jpg", "jpeg", "png", "bmp"]:
                continue

            img = cv2.imread(abs_file_path, 0)
            rect_x = int(img.shape[1] / 2 - rect_w / 2)
            rect_y = int(img.shape[0] / 2 - rect_h / 2)
            rect = [rect_x, rect_y, rect_w, rect_h]
            dst_img_path = os.path.join(dst_img_dir, dst_img_name_prefix + os.path.basename(abs_file_path))
            crop_img_by_rect(rect, abs_file_path, dst_img_path)


def convert_tif(src_tif_path, dst_tif_path):
    """转换tif, 原来的光镜图像全是tif文件, type是uint16的, 和电镜采集的图像是中心对称关系，所以需要把光镜图像全转换一下"""
    src_tif_path = file_utils.get_abs_file_path(src_tif_path)
    file_utils.create_save_dir_from_file(dst_tif_path)

    src_img = skimage.io.imread(src_tif_path)  # 注意，读光镜tif图片用skimage.io.imread(), 不要用cv2.imread()
    dst_img = img_utils.img_pixel_value_invert(src_img)  # 先像素值反转
    dst_img = img_utils.get_central_symmetry_img(dst_img)  # 再中心对称
    dst_img = img_utils.convert_img_to_uint8(dst_img)  # 再uint16转uint8
    cv2.imwrite(dst_tif_path, dst_img)


def loop_convert_tif(src_tif_dir, dst_tif_dir):
    """轮询转换tif, 并保存成png图片"""
    src_tif_dir = file_utils.get_abs_dir(src_tif_dir)
    dst_tif_dir = file_utils.create_dir(dst_tif_dir)

    for file in os.listdir(src_tif_dir):
        abs_file_path = os.path.join(src_tif_dir, file)
        if not os.path.isfile(abs_file_path):
            continue
        tmp_list = file.split(".")
        if tmp_list[-1].lower() != "tif":
            continue
        dst_tif_name = file.replace(".tif", ".png")
        dst_tif_path = os.path.join(dst_tif_dir, dst_tif_name)
        convert_tif(abs_file_path, dst_tif_path)


def draw_img_center(src_img_path, dst_img_path):
    src_img_path = file_utils.get_abs_file_path(src_img_path)
    file_utils.create_save_dir_from_file(dst_img_path)

    img = cv2.imread(src_img_path, 0)
    cv2.circle(img, (int(img.shape[1] / 2), int(img.shape[0] / 2)), 30, 255, 10)
    cv2.imwrite(dst_img_path, img)


def crop_optics_img_sec_to_mFovs(optics_img_path, full_image_coordinates_txt_file, save_img_dir, first_mFov_center):
    """将光镜图像一整张section图像切成多个mFov图像, 根据原始电镜图像中的坐标关系以及手动找到的光镜图像中第一个mFov中心位置, 来推出其他mFov位置并裁剪。"""
    optics_img_path = file_utils.get_abs_file_path(optics_img_path)
    full_image_coordinates_txt_file = file_utils.get_abs_file_path(full_image_coordinates_txt_file)
    save_img_dir = file_utils.create_dir(save_img_dir)

    # 根据原始的full_image_coordinates_txt_file获取每个mFov框位置
    mFov_idx = []
    start_x, start_y = [], []
    with open(full_image_coordinates_txt_file, "r") as f:
        lines = f.readlines()
        lines = sorted(lines)
        for line in lines:
            line_list = line.strip().split("\t")
            img_fname = line_list[0].replace("\\", "/")
            if not (img_fname.split("/")[0]).isdigit():
                continue

            mFov_idx.append(int(img_fname.split("/")[0]))
            start_x.append(float(line_list[1]))
            start_y.append(float(line_list[2]))

    start_x = np.array(start_x)
    start_y = np.array(start_y)
    start_x -= np.min(start_x)
    start_y -= np.min(start_y)

    em_idx_bbox_dict = {}
    cur_mFov_idx = mFov_idx[0]
    cur_mFov_bbox = []
    for idx, x, y in zip(mFov_idx, start_x, start_y):
        if idx == cur_mFov_idx:
            cur_mFov_bbox.append([int(math.floor(x)), int(math.ceil(x + 3876)), int(math.floor(y)),
                                  int(math.ceil(y + 3376))])  # left, right, top, down
        else:
            em_idx_bbox_dict[cur_mFov_idx] = cur_mFov_bbox
            cur_mFov_idx = idx
            cur_mFov_bbox = []
            cur_mFov_bbox.append(
                [int(math.floor(x)), int(math.ceil(x + 3876)), int(math.floor(y)), int(math.ceil(y + 3376))])
    em_idx_bbox_dict[cur_mFov_idx] = cur_mFov_bbox

    for idx, bbox in em_idx_bbox_dict.items():
        bbox = np.array(bbox)
        bbox = [np.min(bbox[:, 0]), np.max(bbox[:, 1]), np.min(bbox[:, 2]), np.max(bbox[:, 3])]
        em_idx_bbox_dict[idx] = bbox

    # 拼接结果是降采样1倍，所以坐标位置除以2
    for idx, bbox in em_idx_bbox_dict.items():
        bbox[0] = int(math.floor(bbox[0] / 2))
        bbox[1] = int(math.ceil(bbox[1] / 2))
        bbox[2] = int(math.floor(bbox[2] / 2))
        bbox[3] = int(math.ceil(bbox[3] / 2))
        em_idx_bbox_dict[idx] = bbox

    # 根据原始的每个mFov框位置推断出光镜图像中每个mFov的位置
    # 这里原始mFov框的大小不完全等于拼接好的mFov图片大小，但是近似
    # 从拼接好的mFov图像中接近中心位置(暂时方案)裁了10800*10800区域大小(每个像素8nm)保存下来，对应光镜图就是250*250大小
    # 但是，光镜图裁剪区域稍微放大一点，1.5倍，即375*375大小
    # 那么可以根据光镜图第一个mFov中心位置，再根据原始的每个mFov框位置的对应关系可以大概判断出光镜中其余mFov位置
    om_idx_center_dict = {}
    for idx, bbox in em_idx_bbox_dict.items():
        if not om_idx_center_dict:  # 光镜图像第一个mFov的中心是指定的first_mFov_center
            om_idx_center_dict[idx] = (int(first_mFov_center[0]), int(first_mFov_center[1]))
        else:
            pre_em_mFov_bbox = em_idx_bbox_dict[idx - 1]
            pre_em_mFov_center = (
                int((pre_em_mFov_bbox[0] + pre_em_mFov_bbox[1]) / 2),
                int((pre_em_mFov_bbox[2] + pre_em_mFov_bbox[3]) / 2))
            cur_em_mFov_bbox = em_idx_bbox_dict[idx]
            cur_em_mFov_center = (
                int((cur_em_mFov_bbox[0] + cur_em_mFov_bbox[1]) / 2),
                int((cur_em_mFov_bbox[2] + cur_em_mFov_bbox[3]) / 2))

            pre_om_mFov_center = om_idx_center_dict[idx - 1]
            cur_om_mFov_center_x = int((cur_em_mFov_center[0] - pre_em_mFov_center[0]) / 43.125 + pre_om_mFov_center[0])
            cur_om_mFov_center_y = int((cur_em_mFov_center[1] - pre_em_mFov_center[1]) / 43.125 + pre_om_mFov_center[1])
            om_idx_center_dict[idx] = (cur_om_mFov_center_x, cur_om_mFov_center_y)

    optics_img = cv2.imread(optics_img_path, cv2.IMREAD_UNCHANGED)
    om_idx_bbox_dict = {}
    crop_size = 375
    for idx, center in om_idx_center_dict.items():
        # 确保bbox不会超出图像边界
        bbox_x0 = max(0, center[0] - int(crop_size / 2))
        bbox_x1 = min(bbox_x0 + crop_size, optics_img.shape[1])
        bbox_y0 = max(0, center[1] - int(crop_size / 2))
        bbox_y1 = min(bbox_y0 + crop_size, optics_img.shape[0])
        # 确保bbox大小为375*375
        if bbox_x1 - bbox_x0 < crop_size:
            if bbox_x0 == 0:
                bbox_x1 = crop_size
            else:
                bbox_x0 = bbox_x1 - crop_size
        if bbox_y1 - bbox_y0 < crop_size:
            if bbox_y0 == 0:
                bbox_y1 = crop_size
            else:
                bbox_y0 = bbox_y1 - crop_size
        om_idx_bbox_dict[idx] = [bbox_x0, bbox_x1, bbox_y0, bbox_y1]

    # # 画框查看是否正确
    # for idx, bbox in om_idx_bbox_dict.items():
    #     cv2.rectangle(optics_img, (bbox[0], bbox[2]), (bbox[1], bbox[3]), 0, 2)
    #     if idx < 10:
    #         txt_center = (int((bbox[0] + bbox[1]) / 2) - 18, int((bbox[2] + bbox[3]) / 2) + 18)
    #     else:
    #         txt_center = (int((bbox[0] + bbox[1]) / 2) - 38, int((bbox[2] + bbox[3]) / 2) + 18)
    #     cv2.putText(optics_img, str(idx), txt_center, cv2.FONT_HERSHEY_SIMPLEX, 2, 0, 2)
    # cv2.imwrite("/media/hqjin/Elements/OEunion_data/Image-1-(2)_tmp.png", optics_img)

    # 原来的光镜图像名称像Image-1-0001.png，需要改成Image-1-S1M*.png
    optics_img_name = os.path.basename(optics_img_path)  # optics_img_name like: Image-1-0001.png
    tmp_list = optics_img_name.split("-")
    sec_idx = int(tmp_list[-1].split(".")[0])
    crop_img_name_prefix = "-".join(tmp_list[:-1]) + "-S" + str(sec_idx)
    for idx, bbox in om_idx_bbox_dict.items():
        crop_img = optics_img[bbox[2]:bbox[3], bbox[0]:bbox[1]]
        crop_img_name = os.path.join(save_img_dir,
                                     crop_img_name_prefix + "M" + str(idx) + "." + optics_img_name.split(".")[-1])
        cv2.imwrite(crop_img_name, crop_img)


def crop_optics_img_sec_to_mFovs2(optics_img_path, full_image_coordinates_txt_file, save_img_dir,
                                  first_three_mFov_center):
    """将光镜图像一整张section图像切成多个mFov图像, 根据原始电镜图像中的坐标关系以及手动找到的光镜图像中前3个mFov中心位置, 来求出一个仿射变换矩阵, 其余的mFov中心可以根据该矩阵求出位置, 再裁剪。
       实践证明该方法不行，因为有的计算出来会超出图像边界。"""
    optics_img_path = file_utils.get_abs_file_path(optics_img_path)
    full_image_coordinates_txt_file = file_utils.get_abs_file_path(full_image_coordinates_txt_file)
    save_img_dir = file_utils.create_dir(save_img_dir)

    # 根据原始的full_image_coordinates_txt_file获取每个mFov框位置
    mFov_idx = []
    start_x, start_y = [], []
    with open(full_image_coordinates_txt_file, "r") as f:
        lines = f.readlines()
        lines = sorted(lines)
        for line in lines:
            line_list = line.strip().split("\t")
            img_fname = line_list[0].replace("\\", "/")
            if not (img_fname.split("/")[0]).isdigit():
                continue

            mFov_idx.append(int(img_fname.split("/")[0]))
            start_x.append(float(line_list[1]))
            start_y.append(float(line_list[2]))

    start_x = np.array(start_x)
    start_y = np.array(start_y)
    start_x -= np.min(start_x)
    start_y -= np.min(start_y)

    em_idx_bbox_dict = {}
    cur_mFov_idx = mFov_idx[0]
    cur_mFov_bbox = []
    for idx, x, y in zip(mFov_idx, start_x, start_y):
        if idx == cur_mFov_idx:
            cur_mFov_bbox.append([int(math.floor(x)), int(math.ceil(x + 3876)), int(math.floor(y)),
                                  int(math.ceil(y + 3376))])  # left, right, top, down
        else:
            em_idx_bbox_dict[cur_mFov_idx] = cur_mFov_bbox
            cur_mFov_idx = idx
            cur_mFov_bbox = []
            cur_mFov_bbox.append(
                [int(math.floor(x)), int(math.ceil(x + 3876)), int(math.floor(y)), int(math.ceil(y + 3376))])
    em_idx_bbox_dict[cur_mFov_idx] = cur_mFov_bbox

    for idx, bbox in em_idx_bbox_dict.items():
        bbox = np.array(bbox)
        bbox = [np.min(bbox[:, 0]), np.max(bbox[:, 1]), np.min(bbox[:, 2]), np.max(bbox[:, 3])]
        em_idx_bbox_dict[idx] = bbox

    # 拼接结果是降采样1倍，所以坐标位置除以2
    for idx, bbox in em_idx_bbox_dict.items():
        bbox[0] = int(math.floor(bbox[0] / 2))
        bbox[1] = int(math.ceil(bbox[1] / 2))
        bbox[2] = int(math.floor(bbox[2] / 2))
        bbox[3] = int(math.ceil(bbox[3] / 2))
        em_idx_bbox_dict[idx] = bbox

    # 根据原始的每个mFov框位置推断出光镜图像中每个mFov的位置
    # 这里原始mFov框的大小不完全等于拼接好的mFov图片大小，但是近似
    # 从拼接好的mFov图像中接近中心位置(暂时方案)裁了10800*10800区域大小(每个像素8nm)保存下来，对应光镜图就是250*250大小
    # 但是，光镜图裁剪区域稍微放大一点，1.5倍，即375*375大小
    # 那么可以根据光镜图第一个mFov中心位置，再根据原始的每个mFov框位置的对应关系可以大概判断出光镜中其余mFov位置
    if 1 not in em_idx_bbox_dict.keys() or 2 not in em_idx_bbox_dict.keys():
        raise Exception("Error! 1 or 2 not in em_idx_bbox_dict.keys().")

    em_mFov1_center_x = (em_idx_bbox_dict[1][0] + em_idx_bbox_dict[1][1]) / 2
    em_mFov1_center_y = (em_idx_bbox_dict[1][2] + em_idx_bbox_dict[1][3]) / 2
    em_mFov2_center_x = (em_idx_bbox_dict[2][0] + em_idx_bbox_dict[2][1]) / 2
    em_mFov2_center_y = (em_idx_bbox_dict[2][2] + em_idx_bbox_dict[2][3]) / 2
    em_mFov3_center_x = (em_idx_bbox_dict[3][0] + em_idx_bbox_dict[3][1]) / 2
    em_mFov3_center_y = (em_idx_bbox_dict[3][2] + em_idx_bbox_dict[3][3]) / 2

    src_pts = np.float32([[em_mFov1_center_x, em_mFov1_center_y], [em_mFov2_center_x, em_mFov2_center_y],
                          [em_mFov3_center_x, em_mFov3_center_y]])
    dst_pts = np.float32(first_three_mFov_center)
    matrix = cv2.getAffineTransform(src_pts, dst_pts)

    om_idx_center_dict = {}
    for idx, bbox in em_idx_bbox_dict.items():
        if idx == 1:
            om_idx_center_dict[idx] = [int(first_three_mFov_center[0][0]), int(first_three_mFov_center[0][1])]
        elif idx == 2:
            om_idx_center_dict[idx] = [int(first_three_mFov_center[1][0]), int(first_three_mFov_center[1][1])]
        elif idx == 3:
            om_idx_center_dict[idx] = [int(first_three_mFov_center[2][0]), int(first_three_mFov_center[2][1])]
        else:
            cur_em_mFov_center_x = (em_idx_bbox_dict[idx][0] + em_idx_bbox_dict[idx][1]) / 2
            cur_em_mFov_center_y = (em_idx_bbox_dict[idx][2] + em_idx_bbox_dict[idx][3]) / 2
            cur_om_mFov_center_x = int(
                matrix[0][0] * cur_em_mFov_center_x + matrix[0][1] * cur_em_mFov_center_y + matrix[0][2])
            cur_om_mFov_center_y = int(
                matrix[1][0] * cur_em_mFov_center_x + matrix[1][1] * cur_em_mFov_center_y + matrix[1][2])
            om_idx_center_dict[idx] = [cur_om_mFov_center_x, cur_om_mFov_center_y]

    optics_img = cv2.imread(optics_img_path, cv2.IMREAD_UNCHANGED)
    om_idx_bbox_dict = {}
    crop_size = 375
    for idx, center in om_idx_center_dict.items():
        # 确保bbox不会超出图像边界
        bbox_x0 = max(0, center[0] - int(crop_size / 2))
        bbox_x1 = min(bbox_x0 + crop_size, optics_img.shape[1])
        bbox_y0 = max(0, center[1] - int(crop_size / 2))
        bbox_y1 = min(bbox_y0 + crop_size, optics_img.shape[0])
        # 确保bbox大小为375*375
        if bbox_x1 - bbox_x0 < crop_size:
            if bbox_x0 == 0:
                bbox_x1 = crop_size
            else:
                bbox_x0 = bbox_x1 - crop_size
        if bbox_y1 - bbox_y0 < crop_size:
            if bbox_y0 == 0:
                bbox_y1 = crop_size
            else:
                bbox_y0 = bbox_y1 - crop_size
        om_idx_bbox_dict[idx] = [bbox_x0, bbox_x1, bbox_y0, bbox_y1]

    # # 画框查看是否正确
    # for idx, bbox in om_idx_bbox_dict.items():
    #     cv2.rectangle(optics_img, (bbox[0], bbox[2]), (bbox[1], bbox[3]), 0, 2)
    #     if idx < 10:
    #         txt_center = (int((bbox[0] + bbox[1]) / 2) - 18, int((bbox[2] + bbox[3]) / 2) + 18)
    #     else:
    #         txt_center = (int((bbox[0] + bbox[1]) / 2) - 38, int((bbox[2] + bbox[3]) / 2) + 18)
    #     cv2.putText(optics_img, str(idx), txt_center, cv2.FONT_HERSHEY_SIMPLEX, 2, 0, 2)
    # cv2.imwrite("/media/hqjin/Elements/OEunion_data/Image-1-(2)_tmp.png", optics_img)

    # 原来的光镜图像名称像Image-1-0001.png，需要改成Image-1-S1M*.png
    optics_img_name = os.path.basename(optics_img_path)  # optics_img_name like: Image-1-0001.png
    tmp_list = optics_img_name.split("-")
    sec_idx = int(tmp_list[-1].split(".")[0])
    crop_img_name_prefix = "-".join(tmp_list[:-1]) + "-S" + str(sec_idx)
    for idx, bbox in om_idx_bbox_dict.items():
        crop_img = optics_img[bbox[2]:bbox[3], bbox[0]:bbox[1]]
        crop_img_name = os.path.join(save_img_dir,
                                     crop_img_name_prefix + "M" + str(idx) + "." + optics_img_name.split(".")[-1])
        cv2.imwrite(crop_img_name, crop_img)


def crop_optics_img_sec(em_img_path, om_img_path, em_resolution, em_zip_scale, om_resolution, om_img_center,
                        om_img_save_dir):
    """这是根据电镜图拼成的一整个section图片, 根据中心以及图片大小, 通过手动在光镜图中寻找中心，并算出对应光镜图中的范围"""
    om_img_save_dir = file_utils.create_dir(om_img_save_dir)
    em_img = cv2.imread(em_img_path, 0)
    em_h, em_w = em_img.shape
    em_real_h, em_real_w = round(em_h / em_zip_scale), round(em_w / em_zip_scale)

    em_block_size = 5120
    # # 画电镜图像网格图，电镜图像每一小块大小是5120*5120
    # block_size = int(em_block_size * em_zip_scale)
    # row_block_num, col_block_num = int(em_h / block_size), int(em_w / block_size)
    # for i in range(1, row_block_num + 1):
    #     cv2.line(em_img, (0, i * block_size), (em_w, i * block_size), 0, 2)
    # for i in range(1, col_block_num + 1):
    #     cv2.line(em_img, (i * block_size, 0), (i * block_size, em_h), 0, 2)
    # em_block_img_path = os.path.join(om_img_save_dir, os.path.basename(em_img_path))
    # cv2.imwrite(em_block_img_path, em_img)

    cor_om_h = round(em_real_h * em_resolution / om_resolution)
    cor_om_w = round(em_real_w * em_resolution / om_resolution)
    left = int(om_img_center[0] - cor_om_w / 2)
    top = int(om_img_center[1] - cor_om_h / 2)
    cor_om_bbox = [left, left + cor_om_w, top, top + cor_om_h]  # left, right, top, down

    om_img = cv2.imread(om_img_path, 0)
    om_img_save_img = om_img[cor_om_bbox[2]:cor_om_bbox[3], cor_om_bbox[0]:cor_om_bbox[1]]
    # om_img_save_path = os.path.join(om_img_save_dir, os.path.basename(om_img_path))
    # cv2.imwrite(om_img_save_path, om_img_save_img)  # 将光镜图像对应区域扣出保存

    block_size = int(em_block_size * em_resolution / om_resolution)
    row_block_num, col_block_num = int(om_img_save_img.shape[0] / block_size), int(
        om_img_save_img.shape[1] / block_size)

    # # 画光镜图像网格图
    # for i in range(1, row_block_num + 1):
    #     cv2.line(om_img_save_img, (0, i * block_size), (om_img_save_img.shape[1], i * block_size), 0, 2)
    # for i in range(1, col_block_num + 1):
    #     cv2.line(om_img_save_img, (i * block_size, 0), (i * block_size, om_img_save_img.shape[0]), 0, 2)
    # om_block_img_name_list = os.path.basename(om_img_path).rsplit(".", 1)
    # om_block_img_name = om_block_img_name_list[0] + "_grid." + om_block_img_name_list[1]
    # om_block_img_path = os.path.join(om_img_save_dir, om_block_img_name)
    # cv2.imwrite(om_block_img_path, om_img_save_img)

    om_img_name_list = os.path.basename(om_img_path).rsplit(".", 1)
    tmp_om_img_idx_str = om_img_name_list[0].rsplit("-", 1)[1]
    tmp_om_img_dir = os.path.join(om_img_save_dir, tmp_om_img_idx_str)
    tmp_om_img_dir = file_utils.create_dir(tmp_om_img_dir)
    for i in range(0, row_block_num + 1):
        for j in range(0, col_block_num + 1):
            left_x = j * block_size
            right_x = min((j + 1) * block_size, om_img_save_img.shape[1])
            top_y = i * block_size
            down_y = min((i + 1) * block_size, om_img_save_img.shape[0])
            if left_x == right_x or top_y == down_y:
                continue
            tmp_om_img = om_img_save_img[top_y:down_y, left_x:right_x]
            tmp_om_img_name = om_img_name_list[0] + "_tr" + str(i + 1) + "-tc" + str(j + 1) + "." + om_img_name_list[1]
            tmp_om_img_path = os.path.join(tmp_om_img_dir, tmp_om_img_name)
            cv2.imwrite(tmp_om_img_path, tmp_om_img)


def crop_optics_img_sec2(em_img_path, om_img_path, em_resolution, em_zip_scale, om_resolution, om_img_center,
                         om_img_save_dir):
    """这是根据电镜图拼成的一整个section图片, 根据中心以及图片大小, 通过手动在光镜图中寻找中心，并算出对应光镜图中的范围,
    电镜中图像假如最后一行, 最后一列不是 em_block_size 倍数, 会丢掉；
    找到对应的光镜图像是以当前中心为中心, 1.5倍实际长宽范围保存, 超出范围pad 0"""
    om_img_save_dir = file_utils.create_dir(om_img_save_dir)
    em_img = cv2.imread(em_img_path, 0)
    em_h, em_w = em_img.shape
    em_real_h, em_real_w = round(em_h / em_zip_scale), round(em_w / em_zip_scale)

    em_block_size = 10240
    om_extend_scale = 1.5
    row_block_num, col_block_num = int(em_real_h / em_block_size), int(em_real_w / em_block_size)

    # 创建对应光镜crop文件夹
    om_img_name_list = os.path.basename(om_img_path).rsplit(".", 1)
    tmp_om_img_idx_str = om_img_name_list[0].rsplit("-", 1)[1]
    tmp_om_img_dir = os.path.join(om_img_save_dir, tmp_om_img_idx_str)
    tmp_om_img_dir = file_utils.create_dir(tmp_om_img_dir)

    om_img = cv2.imread(om_img_path, 0)

    for i in range(0, row_block_num):
        for j in range(0, col_block_num):
            cur_em_block_center_x = int(j * em_block_size + em_block_size / 2)
            cur_em_block_center_y = int(i * em_block_size + em_block_size / 2)
            cur_em_dis_x = cur_em_block_center_x - em_real_w / 2
            cur_em_dis_y = cur_em_block_center_y - em_real_h / 2

            cur_om_dis_x = cur_em_dis_x * em_resolution / om_resolution
            cur_om_dis_y = cur_em_dis_y * em_resolution / om_resolution
            cur_om_block_center_x = int(cur_om_dis_x + om_img_center[0])
            cur_om_block_center_y = int(cur_om_dis_y + om_img_center[1])

            om_block_size = int(em_block_size * em_resolution / om_resolution)
            cur_om_block_left_x = int(cur_om_block_center_x - om_block_size * om_extend_scale / 2)  # 裁的图像是实际的3倍大小
            cur_om_block_right_x = int(cur_om_block_left_x + om_block_size * om_extend_scale)
            cur_om_block_top_y = int(cur_om_block_center_y - om_block_size * om_extend_scale / 2)
            cur_om_block_down_y = int(cur_om_block_top_y + om_block_size * om_extend_scale)

            if cur_om_block_left_x >= 0 and cur_om_block_right_x <= om_img.shape[
                1] and cur_om_block_top_y >= 0 and cur_om_block_down_y <= om_img.shape[0]:
                # 不超出边界的情况
                tmp_om_img = om_img[cur_om_block_top_y:cur_om_block_down_y, cur_om_block_left_x:cur_om_block_right_x]
            else:
                # 超出边界的情况，先将om_img扩大，然后再从中间取
                big_om_img = np.zeros((om_img.shape[0] + om_block_size * 6, om_img.shape[1] + om_block_size * 6),
                                      np.uint8)
                big_om_img[om_block_size * 3:om_block_size * 3 + om_img.shape[0],
                om_block_size * 3:om_block_size * 3 + om_img.shape[1]] = om_img
                cur_om_block_left_x += om_block_size * 3
                cur_om_block_right_x += om_block_size * 3
                cur_om_block_top_y += om_block_size * 3
                cur_om_block_down_y += om_block_size * 3
                tmp_om_img = big_om_img[cur_om_block_top_y:cur_om_block_down_y,
                             cur_om_block_left_x:cur_om_block_right_x]

            tmp_om_img_name = om_img_name_list[0] + "_tr" + str(i + 1) + "-tc" + str(j + 1) + "." + om_img_name_list[1]
            tmp_om_img_path = os.path.join(tmp_om_img_dir, tmp_om_img_name)
            cv2.imwrite(tmp_om_img_path, tmp_om_img)


def loop_crop_optics_img_sec2(em_img_dir, om_img_dir, em_resolution, em_zip_scale, om_resolution, om_img_center,
                              om_img_save_dir):
    """这里em_img_dir里存的是配准好的电镜缩略图,
       om_img_dir里存的是配准好的光镜图,
       em_resolution是电镜render出来的图片的物理分辨率, 通常是8nm,
       em_zip_scale是电镜render出来的小图组成一张缩略图的放缩比例,
       om_resolution是光镜图的物理分辨率, 当前是345nm,
       om_img_center是根据电镜缩略图第一张图片的中心(配准较好的情况下，其他电镜缩略图的中心也是同一个位置), 人工得到光镜第一张图片的中心(配准较好的情况下, 其他光镜图的中心也是同一个位置),
       om_img_save_dir是将光镜裁剪成一块一块小图的存储目录"""
    em_img_dir = file_utils.get_abs_dir(em_img_dir)
    om_img_dir = file_utils.get_abs_dir(om_img_dir)
    file_utils.create_dir(om_img_save_dir)

    em_dict = {}
    # 获取em_img_dir文件夹下所有的电镜图像
    for file in os.listdir(em_img_dir):
        if file.rsplit(".", 1)[1] == "png" and "S" in file and "M" in file:  # 图像名称像：S1M1-42.png
            sec_idx = int(file.split("S")[1].split("M")[0])
            if sec_idx in em_dict.keys():
                raise Exception("Error! Repetitive sec_idx {}.".format(sec_idx))

            em_dict[sec_idx] = os.path.join(em_img_dir, file)
    em_dict = dict(sorted(em_dict.items(), key=lambda x: x[0]))

    # 获取om_img_dir文件夹下所有的光镜图像
    om_dict = {}
    for file in os.listdir(om_img_dir):
        if file.rsplit(".", 1)[1] == "png":  # 图像名称像：Image-1-001.png
            sec_idx = int(file.rsplit(".", 1)[0].split("-")[-1])
            if sec_idx in om_dict.keys():
                raise Exception("Error! Repetitive sec_idx {}.".format(sec_idx))

            om_dict[sec_idx] = os.path.join(om_img_dir, file)
    om_dict = dict(sorted(om_dict.items(), key=lambda x: x[0]))

    if len(em_dict) == 0 or len(om_dict) == 0:
        print("{} or {} is Null.".format(em_img_dir, om_img_dir))

    for sec_idx, em_path in em_dict.items():
        if sec_idx not in om_dict.keys():
            print("{} not in om_dict.keys().".format(sec_idx))
            continue

        crop_optics_img_sec2(em_path, om_dict[sec_idx], em_resolution, em_zip_scale, om_resolution, om_img_center,
                             om_img_save_dir)


def manual_check_electronic_optical_imgs(electronic_img_dir, optical_img_dir):
    electronic_img_dir = file_utils.get_abs_dir(electronic_img_dir)
    optical_img_dir = file_utils.get_abs_dir(optical_img_dir)
    files = os.listdir(electronic_img_dir)
    files = sorted(files)
    # optical_img_names = os.listdir(optical_img_dir)

    # 根据电镜图像名字找光镜图像名字
    # 电镜图像名字，例如：sample1_S2M1_tr1-tc1.png
    # 光镜图像名字，例如：Image-1-S2M1.png
    # cv2.namedWindow("电镜 VS 光镜", cv2.WINDOW_NORMAL)
    for file in files:
        # ======== 有些数据从服务器传到本地出问题 =======#
        if os.path.exists(os.path.join(os.path.dirname(electronic_img_dir), "test2", file)):
            continue
        # ===============#
        abs_file_path = os.path.join(electronic_img_dir, file)
        if not os.path.isfile(abs_file_path):
            continue
        if file.split(".")[-1].lower() not in ["jpg", "jpeg", "png", "bmp"]:
            continue

        img_name_prefix, img_name_suffix = file.rsplit(".", 1)
        tmp_list = img_name_prefix.split("_")
        if len(tmp_list) < 3 or "sample" not in tmp_list[0] or "S" not in tmp_list[1] or "M" not in tmp_list[1]:
            raise Exception("Error! Nonstandard name {}, it should be like sample1_S2M1_tr1-tc1.png".format(file))

        sample_idx = int(tmp_list[0].split("sample")[-1])
        sec_mFov_info = tmp_list[1]

        corres_optical_img_name = "Image-" + str(sample_idx) + "-" + sec_mFov_info + ".png"
        abs_optical_img_path = os.path.join(optical_img_dir, corres_optical_img_name)
        if not os.path.exists(abs_optical_img_path):
            print("Error! Current electronic img is {}, but cannot find corresponding optical img {}".format(file,
                                                                                                             corres_optical_img_name))
            continue

        # 读取两张图片并显示
        electronic_img = cv2.imread(abs_file_path, 0)
        electronic_img = cv2.resize(electronic_img, (0, 0), fx=0.25, fy=0.25)  # 电镜图太大，resize到1/2

        optical_img = cv2.imread(abs_optical_img_path, 0)
        optical_img = cv2.resize(optical_img, (0, 0), fx=5, fy=5)

        total_img_h = max(electronic_img.shape[0], optical_img.shape[0])
        total_img_w = electronic_img.shape[1] + optical_img.shape[1] + 20
        total_img = np.ones((total_img_h, total_img_w), np.uint8) * 255
        if electronic_img.shape[0] >= optical_img.shape[0]:
            total_img[:, :electronic_img.shape[1]] = electronic_img
            optical_img_put_y = int(electronic_img.shape[0] / 2 - optical_img.shape[0] / 2)
            total_img[optical_img_put_y:optical_img_put_y + optical_img.shape[0],
            total_img_w - optical_img.shape[1]:total_img_w] = optical_img
        else:
            electronic_img_put_y = int(optical_img.shape[0] / 2 - electronic_img.shape[0] / 2)
            total_img[electronic_img_put_y:electronic_img_put_y + electronic_img.shape[0],
            :electronic_img.shape[1]] = electronic_img
            total_img[:, total_img_w - optical_img.shape[1]:total_img_w] = optical_img
        # cv2.imshow("电镜 VS 光镜", total_img)
        print("Current electronic img is: {}, optical img is: {}".format(file, corres_optical_img_name))
        cv2.imwrite(os.path.join(os.path.dirname(electronic_img_dir), "test2", file), total_img)
    #     key = cv2.waitKey()
    #     if key == 119: # 对应"w"键，表示当前组合有误，记录下有误的这一组信息
    #         print("Error! Current electronic img {} and optical img {} are unqualified or not matched.".format(file, corres_optical_img_name))
    #         shutil.move(abs_file_path, "/media/hqjin/Elements/OEunion_data/unqualified_data/em_data")
    #         shutil.move(abs_optical_img_path, "/media/hqjin/Elements/OEunion_data/unqualified_data/om_data")
    #     elif key == 27: # 对应"Esc"键，表示退出
    #         break
    #     else: # 其他的key均自动调到下一张
    #         pass

    # cv2.destroyAllWindows()


def get_all_block_img_name_row_col_range(mdir):
    """参数文件夹存放的必须是一张大图或全是block图片, 将大图或所有block图片名称保存在列表中, 将列表从小到大排序；
       大图或每个block图片的名称都要遵循S*M*_tr*-tc*.png命名标准, 第一个*表示section信息, 第二个*表示mFov信息, 第三个*表示行索引，第四个*表示列索引;
       根据图片名获取行列范围，返回图片名列表及行列范围"""
    mdir = file_utils.get_abs_dir(mdir)

    all_img_names = []
    min_row, min_col = np.iinfo(np.int32).max, np.iinfo(np.int32).max
    max_row, max_col = 0, 0
    for file_name in os.listdir(mdir):  # file_name如S2M1-10_tr1-tc2.png，表示section 2的第1到第10个mFov图像的第1行第2列图像
        abs_file_name = os.path.join(mdir, file_name)
        if not os.path.isfile(abs_file_name):
            raise Exception("Error! {} is not a file!".format(abs_file_name))

        if "S" not in file_name or "M" not in file_name or "tr" not in file_name or "tc" not in file_name:
            raise Exception(
                "Error! {} may be not a block img! Block img file name should be like: S*M*_tr*-tc*.png".format(
                    abs_file_name))

        tmp_split_list = file_name.split("tr")
        try:
            row_idx = int(tmp_split_list[1].split("-")[0])
            col_idx = int(tmp_split_list[1].split("tc")[1].split(".")[0])
        except Exception as err:
            raise Exception(
                "Error! Unknown block img file -> {}. Block img file name should be like: S*M*_tr1-tc2.png".format(
                    abs_file_name))

        all_img_names.append(abs_file_name)

        if row_idx < min_row:
            min_row = row_idx

        if col_idx < min_col:
            min_col = col_idx

        if row_idx > max_row:
            max_row = row_idx

        if col_idx > max_col:
            max_col = col_idx

    all_img_names = sorted(all_img_names)
    row_col_dict = {"min_row": min_row, "max_row": max_row, "min_col": min_col, "max_col": max_col}
    return all_img_names, row_col_dict


def check_block_imgs_integral(all_img_names, row_col_dict):
    theoretical_num = (row_col_dict["max_row"] - row_col_dict["min_row"] + 1) * (
            row_col_dict["max_col"] - row_col_dict["min_col"] + 1)
    actual_num = len(all_img_names)
    if theoretical_num != actual_num:
        raise Exception(
            "Error!! Theoretical block images num is {}, but actual block images num is {}.".format(theoretical_num,
                                                                                                    actual_num))


def merge_block_imgs(all_img_names, row_col_dict, scale, merge_img_path):
    file_utils.create_save_dir_from_file(merge_img_path)

    img_prefix = all_img_names[0].split("_tr")[0]
    img_type = all_img_names[0].rsplit(".", 1)[1]

    # 获取第一行第一列图片的shape
    row1_col1_img_path = img_prefix + "_tr" + str(row_col_dict["min_row"]) + "-tc" + str(
        row_col_dict["min_col"]) + "." + img_type
    row1_col1_img = cv2.imread(row1_col1_img_path, 0)
    row1_col1_img_h, row1_col1_img_w = row1_col1_img.shape
    row1_col1_img = None

    # 获取最后一行最后一列图片的shape
    last_img_path = img_prefix + "_tr" + str(row_col_dict["max_row"]) + "-tc" + str(
        row_col_dict["max_col"]) + "." + img_type
    last_img = cv2.imread(last_img_path, 0)
    last_img_h, last_img_w = last_img.shape
    last_img = None

    # 计算合并大图的size，因为大图是有小图resize相应scale得到，防止小数影响总的大小，所以应该这样计算
    merge_img_h = (row_col_dict["max_row"] - row_col_dict["min_row"]) * round(row1_col1_img_h * scale) + round(
        last_img_h * scale)
    merge_img_w = (row_col_dict["max_col"] - row_col_dict["min_col"]) * round(row1_col1_img_w * scale) + round(
        last_img_w * scale)
    merge_img = np.zeros((merge_img_h, merge_img_w), np.uint8)

    # 按行列依次读取图片，并放置大图中
    cur_pos_x, cur_pos_y = 0, 0
    for i in range(row_col_dict["min_row"], row_col_dict["max_row"] + 1):
        for j in range(row_col_dict["min_col"], row_col_dict["max_col"] + 1):
            cur_img_path = img_prefix + "_tr" + str(i) + "-tc" + str(j) + "." + img_type
            if not os.path.exists(cur_img_path):
                raise Exception("Error! {} is not exist!".format(cur_img_path))

            cur_img = cv2.imread(cur_img_path, 0)
            cur_img = cv2.resize(cur_img, (0, 0), fx=scale, fy=scale)
            merge_img[cur_pos_y:cur_pos_y + cur_img.shape[0], cur_pos_x:cur_pos_x + cur_img.shape[1]] = cur_img
            cur_pos_x += cur_img.shape[1]
        cur_pos_x = 0
        cur_pos_y += cur_img.shape[0]
    cv2.imwrite(merge_img_path, merge_img)


def loop_merge_block_imgs(block_imgs_dir, merge_imgs_dir, scale=0.05, process_num=1):
    block_imgs_dir = file_utils.get_abs_dir(block_imgs_dir)
    merge_imgs_dir = file_utils.create_dir(merge_imgs_dir)

    pool = Pool(processes=process_num)
    res_l = []
    for sub_dir in os.listdir(block_imgs_dir):  # sub_dir like: 001
        abs_sub_dir = os.path.join(block_imgs_dir, sub_dir)
        if not os.path.isdir(abs_sub_dir):
            continue

        all_img_names, row_col_dict = get_all_block_img_name_row_col_range(abs_sub_dir)
        if not all_img_names:
            continue

        check_block_imgs_integral(all_img_names, row_col_dict)
        merge_img_name = os.path.basename(all_img_names[0]).split("_tr")[0] + "." + \
                         os.path.basename(all_img_names[0]).rsplit(".", 1)[1]
        merge_img_name = os.path.join(merge_imgs_dir, merge_img_name)
        res = pool.apply_async(merge_block_imgs, (all_img_names, row_col_dict, scale, merge_img_name))
        res_l.append(res)

    pool.close()
    pool.join()

    for i in res_l:
        res = i.get()
    print("Done!")


def convert_src_section_imgs(src_dir, dst_dir):
    """原始采集的电镜图像, 有可能会出现当前section和其他section图片看起来是中心对称的关系, 需要把当前section转换一下: tile图像中心对称, 坐标也要进行处理"""
    src_dir = file_utils.get_abs_dir(src_dir)
    dst_dir = file_utils.create_dir(dst_dir)

    src_coord_txt_path = os.path.join(src_dir, "full_image_coordinates.txt")
    src_coord_txt_path = file_utils.get_abs_file_path(src_coord_txt_path)
    dst_coord_txt_path = os.path.join(dst_dir, "full_image_coordinates.txt")

    src_thumb_coord_txt_path = os.path.join(src_dir, "full_thumbnail_coordinates.txt")
    src_thumb_coord_txt_path = file_utils.get_abs_file_path(src_thumb_coord_txt_path)
    dst_thumb_coord_txt_path = os.path.join(dst_dir, "full_thumbnail_coordinates.txt")

    # 先将所有图片保存到列表中
    src_img_paths, dst_img_paths = [], []
    for sub_dir in os.listdir(src_dir):  # sub_dir like: 001
        abs_sub_dir = os.path.join(src_dir, sub_dir)
        if not os.path.isdir(abs_sub_dir) or not sub_dir.isdigit():
            continue

        abs_dst_sub_dir = os.path.join(dst_dir, sub_dir)
        abs_dst_sub_dir = file_utils.create_dir(abs_dst_sub_dir)

        for file_name in os.listdir(abs_sub_dir):
            abs_file_name = os.path.join(abs_sub_dir, file_name)
            suffix = file_name.rsplit(".", 1)[1]
            if suffix != "bmp":
                continue

            src_img_paths.append(abs_file_name)
            dst_img_paths.append(os.path.join(abs_dst_sub_dir, file_name))

    # 将所有文件分成process_num份，并开启多进程，并行中心对称操作
    process_num = 20
    batch_num = max(math.ceil(len(src_img_paths) / process_num), 1)
    src_group = [src_img_paths[i: i + batch_num] for i in range(0, len(src_img_paths), batch_num)]
    dst_group = [dst_img_paths[i: i + batch_num] for i in range(0, len(dst_img_paths), batch_num)]

    pool = Pool(processes=process_num)
    res_l = []
    for i in range(len(src_group)):
        res = pool.apply_async(img_utils.get_central_symmetry_imgs, (src_group[i], dst_group[i]))
        res_l.append(res)

    pool.close()
    pool.join()

    for i in res_l:
        res = i.get()

    # 转换坐标
    img_paths, img_start_x, img_start_y, img_label = [], [], [], []
    with open(src_coord_txt_path, "r") as f:
        lines = f.readlines()
        for line in lines:
            line_list = line.strip().split("\t")
            img_paths.append(line_list[0])
            img_start_x.append(float(line_list[1]))
            img_start_y.append(float(line_list[2]))
            img_label.append(line_list[3])

    img_start_x_min, img_start_x_max = np.array(img_start_x).min(), np.array(img_start_x).max()
    img_start_y_min, img_start_y_max = np.array(img_start_y).min(), np.array(img_start_y).max()

    with open(dst_coord_txt_path, "w") as f:
        for i in range(len(img_paths)):
            cur_start_x = img_start_x_max - img_start_x[i] + img_start_x_min
            cur_start_y = img_start_y_max - img_start_y[i] + img_start_y_min
            f.writelines(img_paths[i] + "\t" + str(cur_start_x) + "\t" + str(cur_start_y) + "\t" + img_label[
                i] + "\r\n")  # Windows下面是以"\r\n"换行，linux是以"\n"换行，但是后面解析的代码是以Windows下为标准的，所以改成Windows下格式

    # 转换缩略图坐标
    thumb_img_paths, thumb_img_start_x, thumb_img_start_y, thumb_img_label = [], [], [], []
    with open(src_thumb_coord_txt_path, "r") as f:
        lines = f.readlines()
        for line in lines:
            line_list = line.strip().split("\t")
            thumb_img_paths.append(line_list[0])
            thumb_img_start_x.append(float(line_list[1]))
            thumb_img_start_y.append(float(line_list[2]))
            thumb_img_label.append(line_list[3])

    thumb_img_start_x_min, thumb_img_start_x_max = np.array(thumb_img_start_x).min(), np.array(thumb_img_start_x).max()
    thumb_img_start_y_min, thumb_img_start_y_max = np.array(thumb_img_start_y).min(), np.array(thumb_img_start_y).max()

    with open(dst_thumb_coord_txt_path, "w") as f:
        for i in range(len(thumb_img_paths)):
            cur_thumb_start_x = thumb_img_start_x_max - thumb_img_start_x[i] + thumb_img_start_x_min
            cur_thumb_start_y = thumb_img_start_y_max - thumb_img_start_y[i] + thumb_img_start_y_min
            f.writelines(thumb_img_paths[i] + "\t" + str(cur_thumb_start_x) + "\t" + str(cur_thumb_start_y) + "\t" +
                         thumb_img_label[
                             i] + "\r\n")  # Windows下面是以"\r\n"换行，linux是以"\n"换行，但是后面解析的代码是以Windows下为标准的，所以改成Windows下格式


def change_thumbbail_img_name(src_dir, dst_dir):
    """假如文件夹下的缩略图名称不规则, 用看图软件想看连续上下帧图片时会很混乱, 比如正常规则是001_S1R1, 002_S2R1, ..., 如果中间出现001_S3R1, 就需要将文件夹整体名称改成规范式,
       规范式: 00001_S1R1, 00002_S2R2, ..."""
    src_dir = file_utils.get_abs_dir(src_dir)
    dst_dir = file_utils.create_dir(dst_dir)
    for f in os.listdir(src_dir):  # f like: 001_S1R1.png
        abs_f = os.path.join(src_dir, f)
        if not os.path.isfile(abs_f):
            continue

        if "S" not in f or "R" not in f:  # 表示文件不是我们想要的文件
            continue

        sec_idx = int(f.split("S")[1].split("R")[0])
        dst_f = os.path.join(dst_dir, "{}_S{}R1.{}".format(str(sec_idx).zfill(5), sec_idx, f.rsplit(".")[-1]))
        shutil.copy(abs_f, dst_f)


def change_json(src_json, dst_json, rotate_angle):
    """对于电镜采集的一些切片数据, 经常出现前后两帧旋转角度偏差过大, 导致align pre_match时出现找不到global model的情况,
       为此, 我们对出现这种情况的切片做特殊处理, 即将后一帧切片进行人为旋转, 对其拼接的json文件进行修改,
       主要修改json文件中每个tile的transforms, 额外增加一个rigid model, 并更新对应的bbox"""
    src_json = file_utils.get_abs_file_path(src_json)
    dst_dir = file_utils.create_dir(os.path.dirname(dst_json))
    with open(src_json, 'rt') as in_f:
        tilespec = ujson.load(in_f)

    # 只读取第一张图片获取shape
    img_path = tilespec[0]["mipmapLevels"]["0"]["imageUrl"].replace("file://", "")
    img = cv2.imread(img_path, 0)
    height, width = img.shape

    for i in range(len(tilespec)):
        # 对每个tile的transforms新增一个旋转的rigid model
        add_rigid_model_data = str(rotate_angle) + " 0 0"
        add_transform = {"className": "mpicbg.trakem2.transform.RigidModel2D",
                         "dataString": add_rigid_model_data}
        tilespec[i]["transforms"].append(add_transform)

        # 获取图像四个角的位置
        corners = np.array([[0, 0], [width, 0], [width, height], [0, height]])

        # 求出四个角点位置经过所有transforms之后的位置
        for t in tilespec[i]["transforms"]:
            rigid_model_data = t["dataString"]
            angle, delta1, delta2 = float(rigid_model_data.split(" ")[0]), float(rigid_model_data.split(" ")[1]), float(
                rigid_model_data.split(" ")[2])
            delta = [delta1, delta2]

            cos_val = np.cos(angle)
            sin_val = np.sin(angle)
            corners = np.dot([[cos_val, -sin_val], [sin_val, cos_val]], corners.T).T + np.asarray(delta).reshape((1, 2))

        xy_min = np.min(corners, axis=0)
        xy_max = np.max(corners, axis=0)
        new_bbox = [xy_min[0], xy_max[0], xy_min[1], xy_max[1]]
        tilespec[i]["bbox"] = new_bbox

    with open(dst_json, 'wt') as f:
        json.dump(tilespec, f, sort_keys=True, indent=4)


def change_json2(src_json, dst_json, rotate_angle):
    """对于电镜采集的一些切片数据, 经常出现前后两帧旋转角度偏差过大, 导致align pre_match时出现找不到global model的情况,
       为此, 我们对出现这种情况的切片做特殊处理, 即将后一帧切片进行人为旋转, 对其拼接的json文件进行修改,
       主要修改json文件中每个tile的transforms的rigid model, 相比change_json()新增一个rigid model, 这里是直接根据两次rigid model计算更新后的rigid model"""
    src_json = file_utils.get_abs_file_path(src_json)
    dst_dir = file_utils.create_dir(os.path.dirname(dst_json))
    with open(src_json, 'rt') as in_f:
        tilespec = ujson.load(in_f)

    # 只读取第一张图片获取shape
    img_path = tilespec[0]["mipmapLevels"]["0"]["imageUrl"].replace("file://", "")
    img = cv2.imread(img_path, 0)
    height, width = img.shape

    for i in range(len(tilespec)):
        # 获取每个tile的rigid model
        rigid_model_data = tilespec[i]["transforms"][0]["dataString"]
        old_angle, old_delta1, old_delta2 = float(rigid_model_data.split(" ")[0]), float(
            rigid_model_data.split(" ")[1]), float(rigid_model_data.split(" ")[2])
        old_cos_val = np.cos(old_angle)
        old_sin_val = np.sin(old_angle)

        add_cos_val = np.cos(rotate_angle)
        add_sin_val = np.sin(rotate_angle)

        # 具体计算方式自己推导就能得到
        new_cos_val = add_cos_val * old_cos_val - add_sin_val * old_sin_val
        new_sin_val = add_sin_val * old_cos_val + add_cos_val * old_sin_val
        new_angle = np.arctan2(new_sin_val, new_cos_val)

        new_delta1 = add_cos_val * old_delta1 - add_sin_val * old_delta2
        new_delta2 = add_sin_val * old_delta1 + add_cos_val * old_delta2

        new_rigid_model_data = str(new_angle) + " " + str(new_delta1) + " " + str(new_delta2)
        # 更新rigid model
        tilespec[i]["transforms"][0]["dataString"] = new_rigid_model_data

        # 获取图像四个角的位置
        corners = np.array([[0., 0.], [width, 0.], [width, height], [0., height]])

        # 求出四个角点位置经过新的rigid model之后的位置
        new_corners = np.dot([[new_cos_val, -new_sin_val], [new_sin_val, new_cos_val]], corners.T).T + np.asarray(
            [new_delta1, new_delta2]).reshape((1, 2))

        # 更新bbox
        xy_min = np.min(new_corners, axis=0)
        xy_max = np.max(new_corners, axis=0)
        new_bbox = [xy_min[0], xy_max[0], xy_min[1], xy_max[1]]
        tilespec[i]["bbox"] = new_bbox

    # 上一步操作完，整个section的x_min, y_min并不是(0, 0)，所以需要移动到(0, 0)点
    all_bboxes = []
    for i in range(len(tilespec)):
        all_bboxes.append(tilespec[i]["bbox"])

    all_bboxes = np.array(all_bboxes)
    min_x = np.min(all_bboxes[:, 0])
    min_y = np.min(all_bboxes[:, 2])

    for i in range(len(tilespec)):
        # 获取每个tile的rigid model
        rigid_model_data = tilespec[i]["transforms"][0]["dataString"]
        angle, delta1, delta2 = float(rigid_model_data.split(" ")[0]), float(rigid_model_data.split(" ")[1]), float(
            rigid_model_data.split(" ")[2])
        cos_val = np.cos(angle)
        sin_val = np.sin(angle)

        # 更新delta1, delta2
        delta1 -= min_x
        delta2 -= min_y

        new_rigid_model_data = str(angle) + " " + str(delta1) + " " + str(delta2)
        # 更新rigid model
        tilespec[i]["transforms"][0]["dataString"] = new_rigid_model_data

        # 获取图像四个角的位置
        corners = np.array([[0., 0.], [width, 0.], [width, height], [0., height]])

        # 求出四个角点位置经过新的rigid model之后的位置
        new_corners = np.dot([[cos_val, -sin_val], [sin_val, cos_val]], corners.T).T + np.asarray(
            [delta1, delta2]).reshape((1, 2))

        # 更新bbox
        xy_min = np.min(new_corners, axis=0)
        xy_max = np.max(new_corners, axis=0)
        new_bbox = [xy_min[0], xy_max[0], xy_min[1], xy_max[1]]
        tilespec[i]["bbox"] = new_bbox

    with open(dst_json, 'wt') as f:
        json.dump(tilespec, f, sort_keys=True, indent=4)


def gen_section_thumbnail(src_dir, dst_dir, scale=1.0):
    """根据原始数据中的full_thumbnail_coordinates.txt数据信息, 只是根据坐标信息将所有图片拼接成一整个切片的缩略图, 以便查看切片中每个mFov的大致范围, 
       其中src_dir是类似 abc/001_S1R1这种的, dst_dir存储缩略图, scale是缩放系数"""
    src_dir = file_utils.get_abs_dir(src_dir)
    dst_dir = file_utils.create_dir(dst_dir)

    base_dir = os.path.basename(src_dir)

    if "S" not in base_dir or "R" not in base_dir or not base_dir.split("_")[0].isdigit():  # 文件夹名称应该是类似001_S1R1这种的
        print("Error src_dir name -> {}. It should be like abc/001_S1R1.".format(src_dir))
        return

    sec_idx = int(base_dir.split("S")[1].split("R")[0])

    for file in os.listdir(src_dir):
        abs_file_path = os.path.join(src_dir, file)
        if file == "full_thumbnail_coordinates.txt":  # 这里面直接读取缩略图是为了更快的获取所有tile的拼接结果
            with open(abs_file_path, "r") as f:
                lines = f.readlines()

                img_paths, x_locs, y_locs = [], [], []  # 读取文件中的路径信息、x起点坐标、y起点坐标
                for line in lines:
                    str_list = line.split("\t")

                    if len(str_list) == 4:
                        img_paths.append(str_list[0].replace("\\", "/"))
                        x_locs.append(float(str_list[1]))
                        y_locs.append(float(str_list[2]))

                x_locs = np.array(x_locs)
                y_locs = np.array(y_locs)
                # 将原点移至(0, 0)位置
                x_locs -= np.min(x_locs)
                y_locs -= np.min(y_locs)

                x_locs = (x_locs * scale).astype(np.uint16)
                y_locs = (y_locs * scale).astype(np.uint16)

                # 读取图像大小，每个图大小一致，所以只需要读第一张图即可
                img_h, img_w = cv2.imread(os.path.join(src_dir, img_paths[0]), 0).shape
                new_img_h, new_img_w = int(img_h * scale), int(img_w * scale)

                total_img = np.ones((np.max(y_locs) + new_img_h, np.max(x_locs) + new_img_w), np.uint8) * 255
                mFov_xy_dict = {}  # 记录图像的mFov索引，以及x, y坐标
                for img_path, x, y in zip(img_paths, x_locs, y_locs):
                    img = cv2.imread(os.path.join(src_dir, img_path), 0)
                    img = cv2.resize(img, (new_img_w, new_img_h))
                    img = ((skimage.exposure.equalize_adapthist(img, kernel_size=(8, 8),
                                                                clip_limit=0.015)) * 255).astype(np.uint8)
                    img = 255 - img
                    total_img[y:y + new_img_h, x:x + new_img_w] = img

                    mFov_idx = int(os.path.dirname(img_path))
                    if mFov_idx not in mFov_xy_dict.keys():
                        mFov_xy_dict[mFov_idx] = []

                    mFov_xy_dict[mFov_idx].append([x, y])

                for mFov_idx, cood in mFov_xy_dict.items():
                    cood = np.array(cood)
                    xy_min = np.min(cood, 0)
                    xy_max = np.max(cood, 0)
                    bbox = [xy_min[0], xy_max[0] + new_img_w, xy_min[1], xy_max[1] + new_img_h]
                    cv2.rectangle(total_img, (bbox[0], bbox[2]), (bbox[1], bbox[3]), 0, 5)
                    cv2.putText(total_img, str(mFov_idx), (int((bbox[0] + bbox[1]) / 2), int((bbox[2] + bbox[3]) / 2)),
                                cv2.FONT_HERSHEY_SIMPLEX, 2, 0, 2)
                cv2.imwrite(os.path.join(dst_dir, "S" + str(sec_idx) + ".png"), total_img)


def loop_gen_section_thumbnail(src_dir, dst_dir, scale=1.0):
    src_dir = file_utils.get_abs_dir(src_dir)
    dst_dir = file_utils.create_dir(dst_dir)

    pool = Pool(processes=os.cpu_count())
    res_l = []

    for sub_dir in os.listdir(src_dir):
        abs_sub_dir = os.path.join(src_dir, sub_dir)
        if not os.path.isdir(abs_sub_dir):
            continue

        if "S" not in sub_dir or "R" not in sub_dir or not sub_dir.split("_")[0].isdigit():  # 子文件夹应该是类似001_S1R1这种的
            continue

        res = pool.apply_async(gen_section_thumbnail, (abs_sub_dir, dst_dir, scale))
        res_l.append(res)

    pool.close()
    pool.join()

    for i in res_l:
        res = i.get()


def manual_rotate_imgs(src_dir, dst_dir):
    """遍历src_dir文件夹中的所有图像文件, 手动输入需要旋转的角度(整数值, 推荐15, 30, 45, 60, ..., 180, -15, -30, -45, -60, ..., -180, 正值为逆时针旋转, 负值为顺时针旋转),
       并将所有旋转后的图片保存在dst_dir文件夹中"""
    from pandas import DataFrame
    src_dir = file_utils.get_abs_dir(src_dir)
    dst_dir = file_utils.create_dir(dst_dir)
    file_list = []
    for f in os.listdir(src_dir):  # f like: 001_S1R1.png
        abs_f = os.path.join(src_dir, f)
        if not os.path.isfile(abs_f):
            continue

        if "S" not in f or "R" not in f or f.rsplit(".", 1)[1] not in ["jpg", "jpeg", "png"]:  # 表示文件不是我们想要的文件
            continue

        file_list.append(abs_f)

    file_list = sorted(file_list)

    # 遍历所有图像文件，手动输入旋转角度，并将所有旋转角度保存
    base_names, final_angles = [], []
    for file in file_list:
        src_img = cv2.imread(file, 0)
        img_name = os.path.basename(file)
        base_names.append(img_name)
        cv2.namedWindow(img_name, cv2.WINDOW_KEEPRATIO)
        cv2.imshow(img_name, src_img)
        cv2.waitKey(1000)
        final_angle = 0
        while True:  # 因为手动填入旋转角度，可能不对，所以可能会出现多试几次的情况，所以用到while循环
            angle = input("请输入{}的旋转角度： ".format(os.path.basename(file)))
            if angle == "":  # 如果图片角度不需要调整，则直接按"enter"键，则会break
                break

            try:
                angle = int(angle)
            except:
                print("旋转角度应该是个整数值!")
            else:
                final_angle = angle
                dst_img = img_utils.rotate_img(src_img, angle)
                cv2.imshow(img_name, dst_img)
                cv2.waitKey(1000)
        final_angles.append(final_angle)
        cv2.destroyAllWindows()

    assert (len(base_names) == len(final_angles))

    # 在这一步重新读图，旋转，存图是为了保证上面的逻辑是对的，得到的角度是合理的
    for file, angle in zip(file_list, final_angles):
        src_img = cv2.imread(file, 0)
        img_name = os.path.basename(file)
        dst_img = img_utils.rotate_img(src_img, angle)
        dst_path = os.path.join(dst_dir, img_name)
        cv2.imwrite(dst_path, dst_img)
        print("{} rotated {}".format(img_name, angle))

    # 对应change_json()程序中旋转角度计算: angle2 = (-angle) / 180 * 3.1415926
    angle2 = []
    for angle in final_angles:
        tmp_angle = round((-angle) / 180 * 3.1415926, 8)
        angle2.append(tmp_angle)

    # 将图像以及对应的旋转角度保存到excel中
    data = {"name": base_names, "angle": final_angles, "change_json_value": angle2}
    df = DataFrame(data)
    df.to_excel(os.path.join(dst_dir, "angle.xlsx"))


def print_change_json_info(excel_file, src_json_dir, dst_json_dir):
    """在执行完manual_rotate_imgs之后, 会得到一个excel表, 里面记录了对所有切片旋转的角度, 但是需要从src_json_dir中找到相应的json, 然后打印出所有的change_json2()函数信息, 免去手动操作,
       为什么是打印change_json2()信息, 而不是直接执行? 是因为可能存在这种情况, json是从服务器下载下来的, 里面记录的图片的路径都是服务器中的地址, 而不是本地的"""
    import pandas as pd

    excel_file = file_utils.get_abs_file_path(excel_file)
    src_json_dir = file_utils.get_abs_dir(src_json_dir)
    dst_json_dir = file_utils.create_dir(dst_json_dir)
    # excel第1, 2, 3列表头分别是"name", "angle", "change_json_value"
    df = pd.DataFrame(pd.read_excel(excel_file, usecols=[1, 2, 3]))
    dicts = df.to_dict('dict')
    assert len(dicts["name"]) == len(dicts["angle"]) == len(dicts["change_json_value"])

    # 从src_json_dir中读取所有json文件的名字，保存下来
    src_jsons = []
    for f in os.listdir(src_json_dir):  # f like: S1M1-142.json
        abs_f = os.path.join(src_json_dir, f)
        if not os.path.isfile(abs_f) or f.rsplit(".", 1)[1] != "json":
            continue
        src_jsons.append(f)
    src_jsons = sorted(src_jsons)

    for i in range(len(dicts["name"])):
        img_name = dicts["name"][i]
        sec_idx = int(img_name.split("S")[1].split("R")[0])

        # 从src_json中找对应的json
        found = False
        for json in src_jsons:
            tmp_idx = int(json.split("S")[1].split("M")[0])

            if tmp_idx == sec_idx:
                found = True
                src_json = os.path.join(src_json_dir, json)
                dst_json = os.path.join(dst_json_dir, json)
                angle = dicts["change_json_value"][i]
                print("change_json2(\"{}\", \"{}\", {})".format(src_json, dst_json, angle))
                src_jsons.remove(json)  # 从src_jsons删除这一项，减少后面搜索时间
                break

        if not found:  # 若没找到，打印消息
            print("Not found section {} json file.".format(sec_idx))


def copy_from_other_section(src_section_dir, dst_section_dir):
    """因为配准有时候出现配不上的情况, 对此, 需要删除某一张切片, 让前一帧和后一帧配准, 删除的切片就可以复制一份前一帧的图像, 不然neuroglancer显示的话会不连续。
       src_section_dir即复制的源section图像路径, 注意, 该文件夹路径最后应该是00001之类的, 表示section的索引;
       dst_section_dir即复制的目标section图像路径, 注意, 该文件夹路径最后应该是00002之类的, 表示section的索引。"""
    src_section_dir = file_utils.get_abs_dir(src_section_dir)
    dst_section_dir = file_utils.create_dir(dst_section_dir)
    src_idx = int(os.path.basename(src_section_dir))
    dst_idx = int(os.path.basename(dst_section_dir))

    # 遍历sec_section_dir，将所有文件保存到列表
    src_files, dst_files = [], []
    for f in os.listdir(src_section_dir):  # f like: S1M1-2_tr1-tc1.png
        tmp_str = "S" + str(src_idx) + "M"
        if tmp_str not in f:
            print("{} not contain {}".format(f, tmp_str))
            continue
        src_f = os.path.join(src_section_dir, f)
        dst_f = os.path.join(dst_section_dir, f).replace(tmp_str, "S" + str(dst_idx) + "M")
        src_files.append(src_f)
        dst_files.append(dst_f)

    # 将所有文件分成process_num份，并开启多进程，并行拷贝
    process_num = 20
    batch_num = max(math.ceil(len(src_files) / process_num), 1)
    src_group = [src_files[i: i + batch_num] for i in range(0, len(src_files), batch_num)]
    dst_group = [dst_files[i: i + batch_num] for i in range(0, len(dst_files), batch_num)]

    pool = Pool(processes=process_num)
    res_l = []
    for i in range(len(src_group)):
        res = pool.apply_async(file_utils.copy_files, (src_group[i], dst_group[i]))
        res_l.append(res)

    pool.close()
    pool.join()

    for i in res_l:
        res = i.get()
    print("Done!")


def symlink_from_other_section(src_section_dir, dst_section_dir):
    """因为配准有时候出现配不上的情况, 对此, 需要删除某一张切片, 让前一帧和后一帧配准, 删除的切片就可以软链接一份前一帧的图像, 不然neuroglancer显示的话会不连续。
       src_section_dir即源section图像路径, 注意, 该文件夹路径最后应该是00001之类的, 表示section的索引;
       dst_section_dir即要创建软链接的目标section图像路径, 注意, 该文件夹路径最后应该是00002之类的, 表示section的索引。"""
    src_section_dir = file_utils.get_abs_dir(src_section_dir)
    dst_section_dir = file_utils.create_dir(dst_section_dir)
    src_idx = int(os.path.basename(src_section_dir))
    dst_idx = int(os.path.basename(dst_section_dir))

    for f in os.listdir(src_section_dir):  # f like: S1M1-2_tr1-tc1.png
        tmp_str = "S" + str(src_idx) + "M"
        if tmp_str not in f:
            print("{} not contain {}".format(f, tmp_str))
            continue
        src_f = os.path.join(src_section_dir, f)
        dst_f = os.path.join(dst_section_dir, f).replace(tmp_str, "S" + str(dst_idx) + "M")
        os.symlink(src_f, dst_f)


def change_stitch_json_sec_idx(json_path, dst_dir, add_sec_idx):
    """针对某个json文件, 其section需要修改的问题, 有3处是需要修改的: 1.json的文件名; 2.json里的'layer'标签； 3. json里的'imageUrl'标签对应的*_S*R*路径"""
    json_path = file_utils.get_abs_file_path(json_path)
    dst_dir = file_utils.create_dir(dst_dir)

    json_name = os.path.basename(json_path)
    assert "S" in json_name and "M" in json_name
    old_sec_idx = int(json_name.split("S")[1].split("M")[0])
    dst_json = "S{}M{}".format(old_sec_idx + add_sec_idx, json_name.rsplit("M", 1)[1])
    dst_json = os.path.join(dst_dir, dst_json)

    with open(json_path, 'rt') as in_f:
        tilespec = ujson.load(in_f)

    for i in range(len(tilespec)):
        assert tilespec[i]["layer"] == old_sec_idx
        tilespec[i]["layer"] += int(add_sec_idx)
        old_img_path = tilespec[i]["mipmapLevels"]["0"]["imageUrl"]
        replace_str = os.path.basename(os.path.dirname(os.path.dirname(old_img_path)))
        assert "_" in replace_str and "S" in replace_str and "R" in replace_str
        R_num = int(replace_str.rsplit("R", 1)[1])
        new_str = "{}_S{}R{}".format(str(old_sec_idx + add_sec_idx).zfill(5), old_sec_idx + add_sec_idx, R_num)
        new_img_path = old_img_path.replace(replace_str, new_str)
        tilespec[i]["mipmapLevels"]["0"]["imageUrl"] = new_img_path

    with open(dst_json, 'wt') as f:
        json.dump(tilespec, f, sort_keys=True, indent=4)


def loop_change_stitch_json_sec_idx(src_dir, dst_dir, add_sec_idx):
    """src_dir目录下的所有json的section索引都要加上add_sec_idx, 并保存在dst_dir下"""
    src_dir = file_utils.get_abs_dir(src_dir)
    dst_dir = file_utils.create_dir(dst_dir)

    for file in os.listdir(src_dir):
        if file.rsplit(".", 1)[1] == "json":
            change_stitch_json_sec_idx(os.path.join(src_dir, file), dst_dir, add_sec_idx)


def choose_mFov_from_json(src_json_dir, dst_json_dir, txt_path):
    src_json_dir = file_utils.get_abs_dir(src_json_dir)
    dst_json_dir = file_utils.create_dir(dst_json_dir)

    json_dict = {}
    for file in os.listdir(src_json_dir):
        if file.rsplit(".", 1)[1] == "json" and "S" in file and "M" in file:
            sec_idx = int(file.split("S")[1].split("M")[0])
            if sec_idx in json_dict.keys():
                raise Exception("Error! Repetitive sec_idx {}.".format(sec_idx))

            json_dict[sec_idx] = os.path.join(src_json_dir, file)

    with open(txt_path, "r") as f:
        lines = f.readlines()
        for line in lines:
            line_list = line.strip().split(" ")
            if len(line_list) < 4 or line_list[0] != "-e" or not line_list[1].isdigit() or line_list[2] != "-d":
                print("Error line -> {}".format(line))
                continue

            sec_idx = int(line_list[1])
            mFov_idx_list = []
            for i in range(3, len(line_list)):
                if "-" in line_list[i]:
                    tmp_idx_start = int(line_list[i].split("-")[0])
                    tmp_idx_end = int(line_list[i].split("-")[1])
                    for idx in range(tmp_idx_start, tmp_idx_end + 1):
                        mFov_idx_list.append(idx)
                else:
                    mFov_idx_list.append(int(line_list[i]))

            if sec_idx not in json_dict.keys():
                print("{} not in json_dict.keys()".format(sec_idx))
                continue

            if len(mFov_idx_list) == 0:
                print("line {} is NULL.".format(line))
                continue

            with open(json_dict[sec_idx], 'rt') as in_f:
                tilespec = ujson.load(in_f)

            new_tilespec = []
            for i in range(len(tilespec)):
                if tilespec[i]["mfov"] in mFov_idx_list:
                    new_tilespec.append(tilespec[i])

            dst_json = os.path.join(dst_json_dir, os.path.basename(json_dict[sec_idx]))
            with open(dst_json, 'wt') as f:
                json.dump(new_tilespec, f, sort_keys=True, indent=4)


def showmasks(mask, tmp_img, name, add=True):
    # 将布尔遮罩转换为uint8类型的图像
    mask_uint8 = mask.astype(np.uint8) * 255

    # 创建一个与原始图像同样大小的颜色遮罩，这里使用绿色表示遮罩区域
    color_mask = np.zeros_like(tmp_img)
    color_mask[mask_uint8 == 255] = [0, 255, 0]

    # 将彩色遮罩叠加到原始图像上
    overlayed_image = cv2.addWeighted(tmp_img, 1, color_mask, 0.2, 0)

    # 在原始图像的窗口中显示叠加了遮罩的图像
    cv2.imshow(name, overlayed_image)
    if add:
        add_mask_to_queue(mask)
    pass


def create_rect_mask(image, point1, point2):
    if 'LUPoint' in globals() and 'RDPoint' in globals():
        if isinstance(LUPoint, tuple) and isinstance(RDPoint, tuple):
            # 如果 LUPoint 和 RDPoint 都是元组，则继续
            # 创建一个形状与 image 相同的全False布尔数组
            mask = np.full(image.shape[:2], False, dtype=bool)

            # 计算矩形的边界（最小和最大 x,y 坐标）
            x_min = min(point1[0], point2[0])
            x_max = max(point1[0], point2[0])
            y_min = min(point1[1], point2[1])
            y_max = max(point1[1], point2[1])

            # 设置矩形区域为True
            mask[y_min:y_max, x_min:x_max] = True

            return mask
        else:
            # 如果 LUPoint 或 RDPoint 不是元组，则处理错误或跳过
            print("Error: LUPoint or RDPoint is not defined as a tuple.")
            return


def undo_last_mask():
    global mask_queue
    if len(mask_queue) > 1:
        mask_queue.pop()  # 移除最后一个掩码
        return mask_queue[-1]  # 返回上一个掩码
    elif len(mask_queue) == 1:
        return mask_queue[0]  # 如果只有一个掩码，返回它
    else:
        return None  # 如果没有掩码，返回 None


mask_queue = []  # 初始化掩码队列


def mask_clear():
    global mask_queue, mask
    mask = None
    mask_queue = []


def add_mask_to_queue(mask):
    global mask_queue
    mask_queue.append(mask.copy())  # 添加掩码的副本到队列


def combine_masks(mask1, mask2):
    # 确保两个掩码的尺寸相同
    if mask1.shape != mask2.shape:
        raise ValueError("Masks must be of the same size")

    # 使用逻辑或操作叠加两个掩码
    combined_mask = np.logical_or(mask1, mask2)
    return combined_mask


def subtract_masks(mask1, mask2):
    # 确保两个掩码的尺寸相同
    if mask1.shape != mask2.shape:
        print("mask1", mask1.shape)
        print("mask2", mask2.shape)
        raise ValueError("Masks must be of the same size")

    # 对mask2取逻辑非，然后与mask1进行逻辑与操作
    result_mask = np.logical_and(mask1, np.logical_not(mask2))
    return result_mask


mask = None


def on_mouse_draw_rect(event, x, y, flags, param):
    """左键按下, 选rect的左上角点; 按住左键并拖动, 画框; 左键弹起, 确定框"""
    global point1, point2
    global LUPoint, RDPoint
    global mask
    tmp_img = param["img_data"].copy()
    predictor = param["predictor"]
    if mask is None:
        mask = param["mask"]
    if event == cv2.EVENT_LBUTTONDOWN and not shift_pressed:  # 左键点击
        valid_x = min(max(0, x), tmp_img.shape[1])  # tmp_img.shape[1]要不要减1基本关系不大
        valid_y = min(max(0, y), tmp_img.shape[0])
        # 实际操作的时候发现x,y想选到0比较难，所以假如x,y值很小，就理解为想从0开始
        valid_x = 0 if valid_x <= 20 else valid_x
        valid_y = 0 if valid_y <= 20 else valid_y
        point1 = (valid_x, valid_y)

        cv2.circle(tmp_img, point1, 20, 0, 5)
        cv2.imshow(str(param["sec_idx"]), tmp_img)
        addPoint(point1, 1)
        mask = generate_mask_with_sam(predictor=predictor)
        showmasks(mask, tmp_img, str(param["sec_idx"]))
    elif event == cv2.EVENT_RBUTTONDOWN and not shift_pressed:  # 右键点击负点
        valid_x = min(max(0, x), tmp_img.shape[1])  # tmp_img.shape[1]要不要减1基本关系不大
        valid_y = min(max(0, y), tmp_img.shape[0])
        # 实际操作的时候发现x,y想选到0比较难，所以假如x,y值很小，就理解为想从0开始
        valid_x = 0 if valid_x <= 20 else valid_x
        valid_y = 0 if valid_y <= 20 else valid_y
        point1 = (valid_x, valid_y)

        cv2.circle(tmp_img, point1, 20, 0, 5)
        cv2.imshow(str(param["sec_idx"]), tmp_img)
        addPoint(point1, 0)
        mask = generate_mask_with_sam(predictor=predictor)
        showmasks(mask, tmp_img, str(param["sec_idx"]))
    elif event == cv2.EVENT_LBUTTONDOWN and shift_pressed:
        LUPoint = (x, y)
        # 画框增加mask区域
    elif event == cv2.EVENT_LBUTTONUP and shift_pressed:
        RDPoint = (x, y)
        mask1 = create_rect_mask(tmp_img, LUPoint, RDPoint)
        mask = combine_masks(mask1, mask)
        showmasks(mask, tmp_img, str(param["sec_idx"]))
    elif event == cv2.EVENT_RBUTTONDOWN and shift_pressed:
        # 画框减少mask区域
        LUPoint = (x, y)
    elif event == cv2.EVENT_RBUTTONUP and shift_pressed:
        RDPoint = (x, y)
        mask1 = create_rect_mask(tmp_img, LUPoint, RDPoint)
        mask = subtract_masks(mask, mask1)
        showmasks(mask, tmp_img, str(param["sec_idx"]))
    elif event == cv2.EVENT_MBUTTONDOWN and shift_pressed:
        mask = undo_last_mask()
        showmasks(mask, tmp_img, str(param["sec_idx"]), False)
    elif event == cv2.EVENT_MBUTTONDOWN:  # 中键撤回
        mask_clear()
        recall()
        mask = generate_mask_with_sam(predictor=predictor)
        showmasks(mask, tmp_img, str(param["sec_idx"]))


# elif event == cv2.EVENT_MOUSEMOVE and (flags & cv2.EVENT_FLAG_LBUTTON):  # 按住左键拖曳
#     valid_x = min(max(0, x), tmp_img.shape[1])
#     valid_y = min(max(0, y), tmp_img.shape[0])
#     cv2.rectangle(tmp_img, point1, (valid_x, valid_y), 0, 5)
#     cv2.imshow(str(param["sec_idx"]), tmp_img)
# elif event == cv2.EVENT_LBUTTONUP:  # 左键释放
#     valid_x = min(max(0, x), tmp_img.shape[1])
#     valid_y = min(max(0, y), tmp_img.shape[0])
#     point2 = (valid_x, valid_y)
#     cv2.rectangle(tmp_img, point1, point2, 0, 5)
#     cv2.imshow(str(param["sec_idx"]), tmp_img)
#
#     min_x, min_y = min(point1[0], point2[0]), min(point1[1], point2[1])
#     max_x, max_y = max(point1[0], point2[0]), max(point1[1], point2[1])
#     tmp_rect = [min_x, max_x, min_y, max_y]
#     print("on_mouse_draw_rect, rect: {}".format(tmp_rect))
import keyboard

shift_pressed = False


def on_b_press(key):
    global shift_pressed
    if key.name == 'shift':
        shift_pressed = not shift_pressed


def enhance_contrast(image):
    # 亮度大于100的部分增加亮度
    image[image > 200] = np.clip(image[image > 200] * 1.1, 0, 255)

    # 亮度低于100的部分降低亮度
    image[image <= 200] = np.clip(image[image <= 200] * 0.9, 0, 255)
    return image


def interpolate_points_on_rectangle(rect, n):
    # 矩形的四个顶点
    p1, p2, p3, p4 = rect

    # 计算每条边上的点
    points = np.vstack([
        np.linspace(p1, p2, n, endpoint=False),  # p1 到 p2
        np.linspace(p2, p3, n, endpoint=False),  # p2 到 p3
        np.linspace(p3, p4, n, endpoint=False),  # p3 到 p4
        np.linspace(p4, p1, n, endpoint=True)  # p4 到 p1，包含 p1 以闭合矩形
    ])

    return points


def count_points_in_mask(points, mask):
    count = 0
    for point in points:
        x, y = int(point[0]), int(point[1])
        # 检查点是否在掩码内部
        if 0 <= y < mask.shape[0] and 0 <= x < mask.shape[1] and mask[y, x]:
            count += 1
    return count

def checkPath(out_dir1):
    folder_path = Path(out_dir1)
    if not folder_path.exists():
        folder_path.mkdir(parents=True)

def save_mask(dir, base_name, img):
    # 构建掩码图像的文件名（在原始文件名后添加 _mask 后缀）
    mask_filename = os.path.splitext(base_name)[0] + '_mask.png'
    checkPath(dir)
    # 构建完整的掩码图像路径
    mask_filepath = os.path.join(dir, mask_filename)

    # 将布尔型掩码转换为8位无符号整型，True映射为255，False映射为0
    img = np.uint8(mask) * 255
    # 保存图像
    cv2.imwrite(mask_filepath, img)

    return mask_filepath  # 可以返回生成的掩码图像路径
    pass


def save_image_with_mask_edge(img, mask, save_img_dir, base_name):
    # 计算掩码的边缘
    mask_uint8 = np.uint8(mask) * 255
    edges = cv2.Canny(mask_uint8, 100, 200)

    # 创建一个只有边缘的彩色图像（这里使用红色表示边缘）
    edge_img = np.zeros_like(img)
    edge_img[edges != 0] = [0, 0, 255]  # 红色

    # 将边缘叠加到原始图像上
    combined_img = cv2.addWeighted(img, 1, edge_img, 1, 0)

    # 构造保存路径
    save_filename = os.path.splitext(base_name)[0] + '_with_mask_edge.png'
    save_filepath = os.path.join(save_img_dir, save_filename)

    # 保存图像
    cv2.imwrite(save_filepath, combined_img)

    return save_filepath  # 返回保存的文件路径


def draw_rect_interact(stitch_json_dir, stitch_render_img_dir, save_json_dir, save_img_dir, save_mask_dir,
                       render_scale):
    keyboard.on_press(on_b_press)
    stitch_json_dir = file_utils.get_abs_dir(stitch_json_dir)
    stitch_render_img_dir = file_utils.get_abs_dir(stitch_render_img_dir)
    file_utils.create_dir(save_json_dir)
    file_utils.create_dir(save_img_dir)

    json_dict = {}
    # 获取stitch_json_dir文件夹下所有的json文件
    for file in os.listdir(stitch_json_dir):
        if file.rsplit(".", 1)[1] == "json" and "S" in file and "M" in file:
            sec_idx = int(file.split("S")[1].split("M")[0])
            if sec_idx in json_dict.keys():
                raise Exception("Error! Repetitive sec_idx {}.".format(sec_idx))

            json_dict[sec_idx] = os.path.join(stitch_json_dir, file)

    # 获取stitch_render_img_dir文件夹下所有的png文件
    img_dict = {}
    for file in os.listdir(stitch_render_img_dir):
        if file.rsplit(".", 1)[1] == "png" and "S" in file and "M" in file:
            sec_idx = int(file.split("S")[1].split("M")[0])
            if sec_idx in img_dict.keys():
                raise Exception("Error! Repetitive sec_idx {}.".format(sec_idx))

            img_dict[sec_idx] = os.path.join(stitch_render_img_dir, file)
    img_dict = dict(sorted(img_dict.items(), key=lambda x: x[0]))

    if len(json_dict) == 0 or len(img_dict) == 0:
        print("{} or {} is Null.".format(stitch_json_dir, stitch_render_img_dir))

    infos = []  # 记录所有图片的画框的点信息，(注意，这个函数是调用画框的on_mouse，只记录左上角和右下角)，最后将所有信息保存到txt文件中，防止以后追溯
    for sec_idx, img_path in img_dict.items():
        if sec_idx not in json_dict.keys():
            print("{} not in json_dict.keys().".format(sec_idx))
            continue
        predictor_pool = PredictorPool()
        img = cv2.imread(img_path, 0)
        img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
        # img = enhance_contrast(img)
        predictor = predictor_pool.get_predictor(img)
        cv2.namedWindow(str(sec_idx), cv2.WINDOW_KEEPRATIO)
        mdict = {}
        mdict["sec_idx"] = sec_idx
        mdict["img_data"] = img
        mdict["predictor"] = predictor
        # cv2.imshow(str(sec_idx), img)
        mask = generate_mask_with_sam(predictor=predictor)
        showmasks(mask, img, str(sec_idx))
        mdict["mask"] = mask
        cv2.setMouseCallback(str(sec_idx), on_mouse_draw_rect, mdict)

        while True:
            key = cv2.waitKey(0)
            if key == 32:
                break

        cv2.destroyAllWindows()
        # min_x, min_y = min(point1[0], point2[0]), min(point1[1], point2[1])
        # max_x, max_y = max(point1[0], point2[0]), max(point1[1], point2[1])
        # draw_rect = [min_x, max_x, min_y, max_y]  # 和json文件中bbox格式保持一致：[min_x, max_x, min_y, max_y]
        # draw_rect_pts = [[min_x, min_y], [max_x, min_y], [max_x, max_y], [min_x, max_y]]  # 用于后面判断和tile的区域有没有相交
        # print("sec_idx: {}, rect: {}".format(sec_idx, draw_rect))

        # 读取json
        with open(json_dict[sec_idx], "r") as f:
            tilespec = json.load(f)

        # # 只读取第一张图片获取shape，按理说是要这样读取图片的shape，但是由于图片数据都存放在服务器，本地无法读取，所以直接获取图片大小
        # img_path = tilespec[0]["mipmapLevels"]["0"]["imageUrl"].replace("file://", "")
        # height, width = cv2.imread(img_path, 0).shape
        height, width = 3500, 4004
        corners = np.array([[0, 0], [width, 0], [width, height], [0, height]])

        # 将所有与画的框相交或者在框内部的tile保留下来
        keep_tiles = []
        for tile in tilespec:
            # 判断相交，这里不能通过tile的bbox是否和画的框相交来判断是否相交，因为tile有可能是斜着摆放的，可能会有这种情况，其四个顶点不在画的框内，但是对应的bbox的四个顶点有几个落在画的框内
            # 注意，因为这段逻辑是对拼接后的json处理，拼接的transforms model是RigidModel2D，所以下面的计算方式是按照RigidModel2D的计算方式来进行的
            for t in tile["transforms"]:
                rigid_model_data = t["dataString"]
                angle, delta1, delta2 = float(rigid_model_data.split(" ")[0]), float(
                    rigid_model_data.split(" ")[1]), float(rigid_model_data.split(" ")[2])
                cos_val = np.cos(angle)
                sin_val = np.sin(angle)
                tmp_corners = np.dot([[cos_val, -sin_val], [sin_val, cos_val]], corners.T).T + np.asarray(
                    [delta1, delta2]).reshape((1, 2))
                tmp_corners *= render_scale  # 在实际缩放图中四个顶点的位置
                interpolated_points = interpolate_points_on_rectangle(tmp_corners, 10)  # 每条边均匀取10个点
                count = count_points_in_mask(interpolated_points, mask)
                if (count > 5):  # 落在mask区域内的点多余五个则认为相交
                    keep_tiles.append(tile)
                # draw_p = Polygon(draw_rect_pts)
                # tile_p = Polygon(tmp_corners)
                # if draw_p.intersects(tile_p):  # 两个多边形有交集(只交集1个点也算交集)
                #     keep_tiles.append(tile)

        # 写json
        dst_json = os.path.join(save_json_dir, os.path.basename(json_dict[sec_idx]))
        with open(dst_json, 'wt') as f:
            json.dump(keep_tiles, f, sort_keys=True, indent=4)
        save_image_with_mask_edge(img, mask, save_img_dir, os.path.basename(img_path))
        save_mask(save_mask_dir, os.path.basename(img_path), img)
        mask_clear()
        # 将图像画框保存，并且保存点信息到txt文件中
        # cv2.rectangle(img, (min_x, min_y), (max_x, max_y), 0, 5)
        # cv2.imwrite(os.path.join(save_img_dir, os.path.basename(img_path)), img)
    keyboard.unhook(on_b_press)
    # info_str = "{0} ({1}, {2}) ({3}, {2}) ({3}, {4}) ({1}, {4})".format(os.path.basename(img_path), min_x, min_y,
    #                                                                     max_x, max_y)
    # infos.append(info_str)

    # # 将所有图片画的框的点信息保存下来
    # with open(os.path.join(save_img_dir, "draw_pts.txt"), "w") as f:
    #     for info in infos:
    #         f.writelines("{}\n".format(info))


draw_poly_pts = []


def on_mouse_draw_polygon(event, x, y, flags, param):
    """点击左键, 选取多边形点; 点击右键, 选取结束; 点击中键, 取消选取的点"""
    global draw_poly_pts
    tmp_img = param["img_data"].copy()
    if event == cv2.EVENT_LBUTTONDOWN:  # 左键点击表示选多边形的点
        valid_x = min(max(0, x), tmp_img.shape[1])  # tmp_img.shape[1]要不要减1基本关系不大
        valid_y = min(max(0, y), tmp_img.shape[0])
        # 实际操作的时候发现x,y想选到0比较难，所以假如x,y值很小，就理解为想从0开始
        valid_x = 0 if valid_x <= 20 else valid_x
        valid_y = 0 if valid_y <= 20 else valid_y
        pt = (valid_x, valid_y)
        draw_poly_pts.append(pt)
        # print("L: ", draw_poly_pts)
        if len(draw_poly_pts) == 1:
            cv2.circle(tmp_img, draw_poly_pts[0], 20, 0, 5)
        else:
            for i in range(len(draw_poly_pts) - 1):
                cv2.circle(tmp_img, draw_poly_pts[i], 20, 0, 5)
                cv2.line(tmp_img, draw_poly_pts[i], draw_poly_pts[i + 1], 0, 5)
        cv2.imshow(str(param["sec_idx"]), tmp_img)
    elif event == cv2.EVENT_RBUTTONDOWN:  # 右键点击表示选点结束
        # print("R: ", draw_poly_pts)
        if len(draw_poly_pts) <= 2:
            print("Error! You cannot choose points <= 2.")
            if len(draw_poly_pts) == 1:
                cv2.circle(tmp_img, draw_poly_pts[0], 20, 0, 5)
            elif len(draw_poly_pts) == 2:
                for i in range(len(draw_poly_pts) - 1):
                    cv2.circle(tmp_img, draw_poly_pts[i], 20, 0, 5)
                    cv2.line(tmp_img, draw_poly_pts[i], draw_poly_pts[i + 1], 0, 5)
        else:
            for i in range(len(draw_poly_pts) - 1):
                cv2.circle(tmp_img, draw_poly_pts[i], 20, 0, 5)
                cv2.line(tmp_img, draw_poly_pts[i], draw_poly_pts[i + 1], 0, 5)
            cv2.line(tmp_img, draw_poly_pts[0], draw_poly_pts[-1], 0, 5)
        cv2.imshow(str(param["sec_idx"]), tmp_img)
    elif event == cv2.EVENT_MBUTTONDOWN:  # 中键点击表示选点有问题，取消所有选点
        if len(draw_poly_pts) > 1:
            del draw_poly_pts[-1]
            if len(draw_poly_pts) == 1:
                cv2.circle(tmp_img, draw_poly_pts[0], 20, 0, 5)
            else:
                for i in range(len(draw_poly_pts) - 1):
                    cv2.circle(tmp_img, draw_poly_pts[i], 20, 0, 5)
                    cv2.line(tmp_img, draw_poly_pts[i], draw_poly_pts[i + 1], 0, 5)
        else:
            draw_poly_pts = []
        # print("M: ", draw_poly_pts)
        cv2.imshow(str(param["sec_idx"]), tmp_img)


def draw_polygon_interact(stitch_json_dir, stitch_render_img_dir, save_json_dir, save_img_dir, render_scale):
    stitch_json_dir = file_utils.get_abs_dir(stitch_json_dir)
    stitch_render_img_dir = file_utils.get_abs_dir(stitch_render_img_dir)
    file_utils.create_dir(save_json_dir)
    file_utils.create_dir(save_img_dir)

    json_dict = {}
    # 获取stitch_json_dir文件夹下所有的json文件
    for file in os.listdir(stitch_json_dir):
        if file.rsplit(".", 1)[1] == "json" and "S" in file and "M" in file:
            sec_idx = int(file.split("S")[1].split("M")[0])
            if sec_idx in json_dict.keys():
                raise Exception("Error! Repetitive sec_idx {}.".format(sec_idx))

            json_dict[sec_idx] = os.path.join(stitch_json_dir, file)

    # 获取stitch_render_img_dir文件夹下所有的png文件
    img_dict = {}
    for file in os.listdir(stitch_render_img_dir):
        if file.rsplit(".", 1)[1] == "png" and "S" in file and "M" in file:
            sec_idx = int(file.split("S")[1].split("M")[0])
            if sec_idx in img_dict.keys():
                raise Exception("Error! Repetitive sec_idx {}.".format(sec_idx))

            img_dict[sec_idx] = os.path.join(stitch_render_img_dir, file)
    img_dict = dict(sorted(img_dict.items(), key=lambda x: x[0]))

    if len(json_dict) == 0 or len(img_dict) == 0:
        print("{} or {} is Null.".format(stitch_json_dir, stitch_render_img_dir))

    infos = []  # 记录所有图片的画多边形的点信息，(注意，这个函数是调用画多边形的on_mouse)，最后将所有信息保存到txt文件中，防止以后追溯
    for sec_idx, img_path in img_dict.items():
        if sec_idx not in json_dict.keys():
            print("{} not in json_dict.keys().".format(sec_idx))
            continue

        img = cv2.imread(img_path, 0)
        cv2.namedWindow(str(sec_idx), cv2.WINDOW_KEEPRATIO)
        mdict = {}
        mdict["sec_idx"] = sec_idx
        mdict["img_data"] = img
        global draw_poly_pts
        # print("Pre: ", draw_poly_pts)
        cv2.setMouseCallback(str(sec_idx), on_mouse_draw_polygon, mdict)
        cv2.imshow(str(sec_idx), img)
        cv2.waitKey(0)
        cv2.destroyAllWindows()

        # 读取json
        with open(json_dict[sec_idx], "r") as f:
            tilespec = json.load(f)

        # # 只读取第一张图片获取shape，按理说是要这样读取图片的shape，但是由于图片数据都存放在服务器，本地无法读取，所以直接获取图片大小
        # img_path = tilespec[0]["mipmapLevels"]["0"]["imageUrl"].replace("file://", "")
        # height, width = cv2.imread(img_path, 0).shape
        height, width = 3500, 4004
        corners = np.array([[0, 0], [width, 0], [width, height], [0, height]])

        # 将所有与画的框相交或者在框内部的tile保留下来
        keep_tiles = []
        for tile in tilespec:
            # 判断相交，这里不能通过tile的bbox是否和画的多边形相交来判断是否相交，因为tile有可能是斜着摆放的，可能会有这种情况，其四个顶点不在画的多边形内，但是对应的bbox的四个顶点有几个落在画的多边形内
            # 注意，因为这段逻辑是对拼接后的json处理，拼接的transforms model是RigidModel2D，所以下面的计算方式是按照RigidModel2D的计算方式来进行的
            for t in tile["transforms"]:
                rigid_model_data = t["dataString"]
                angle, delta1, delta2 = float(rigid_model_data.split(" ")[0]), float(
                    rigid_model_data.split(" ")[1]), float(rigid_model_data.split(" ")[2])
                cos_val = np.cos(angle)
                sin_val = np.sin(angle)
                tmp_corners = np.dot([[cos_val, -sin_val], [sin_val, cos_val]], corners.T).T + np.asarray(
                    [delta1, delta2]).reshape((1, 2))
                tmp_corners *= render_scale  # 在实际缩放图中四个顶点的位置

                if len(draw_poly_pts) < 3:  # 注意：如果画的点的个数小于3，表示不认为画了多边形(相当于没画)，所有tile都保留
                    keep_tiles.append(tile)
                else:
                    draw_p = Polygon(draw_poly_pts)
                    tile_p = Polygon(tmp_corners)
                    if draw_p.intersects(tile_p):  # 两个多边形有交集(只交集1个点也算交集)
                        keep_tiles.append(tile)

        # 写json
        dst_json = os.path.join(save_json_dir, os.path.basename(json_dict[sec_idx]))
        with open(dst_json, 'wt') as f:
            json.dump(keep_tiles, f, sort_keys=True, indent=4)

        # 将图像画框保存，并且保存点信息到txt文件中
        # print("Post: ", draw_poly_pts)
        draw_poly_pts = np.array(draw_poly_pts)
        if len(draw_poly_pts) >= 3:
            cv2.polylines(img, [draw_poly_pts], True, 0, 5)
        cv2.imwrite(os.path.join(save_img_dir, os.path.basename(img_path)), img)

        info_str = os.path.basename(img_path)
        if len(draw_poly_pts) >= 3:
            for i in range(len(draw_poly_pts)):
                info_str += " ({}, {})".format(draw_poly_pts[i][0], draw_poly_pts[i][1])
        infos.append(info_str)

        draw_poly_pts = []

    # # 将所有图片画的框的点信息保存下来
    # with open(os.path.join(save_img_dir, "draw_pts.txt"), "w") as f:
    #     for info in infos:
    #         f.writelines("{}\n".format(info))


def get_tile_from_point(pt, json_path, scale=0.5):
    """这个函数主要是为了方便从neuroglancer中发现一个地方有问题, 想找到原始数据查看, 那么就可以通过获取有问题的点的坐标(x, y), 然后通过对应的json来知道这个点是属于哪一个tile或者哪几个tile.
       电镜采集默认是4nm, 所以json里面的bbox都是4nm, 但是neuroglancer显示的(render出来的)通常都是8nm, 所以scale=0.5, 若render是用的是其他scale, 则这里对应也要修改"""
    json_path = file_utils.get_abs_file_path(json_path)
    with open(json_path, "r") as f:
        tilespec = json.load(f)

    tile_infos = []
    for tile in tilespec:
        tile_bbox = np.array(tile["bbox"]) * scale  # [min_x, max_x, min_y, max_y]
        if pt[0] >= tile_bbox[0] and pt[0] <= tile_bbox[1] and pt[1] >= tile_bbox[2] and pt[1] <= tile_bbox[
            3]:  # 点在bbox内
            tile_info = "Sec {} mFov {} tile {}, img_path: {}".format(tile["layer"], tile["mfov"], tile["tile_index"],
                                                                      tile["mipmapLevels"]["0"]["imageUrl"].replace(
                                                                          "file://", ""))
            tile_infos.append(tile_info)

    if len(tile_infos) == 0:
        print("Not found tile info according to point {}.".format(pt))
    else:
        print("Point {} may be in:".format(pt))
        for tile_info in tile_infos:
            print(tile_info)


def main():
    parser = argparse.ArgumentParser(description="This is a auxiliary tool.")
    parser.add_argument("-i", "--in_dir", type=str, default="/media/hqjin/Elements/em_data", help="source dir")
    parser.add_argument("-o", "--out_dir", type=str, default="/media/hqjin/Elements/em_data", help="output dir")
    parser.add_argument("-p", "--process_num", type=int, default=16, help="the number of processes to use(default: 16)")
    args = parser.parse_args()

    # invert_imgs("/media/hqjin/Elements/em_data/Experiment_20220508_18-33-18/001_S1R1/000001", 
    #             "/media/hqjin/Elements/em_data/Experiment_20220508_18-33-18/out/stitch/tmp/001_S1R1/000001")

    # get_tile_layout_by_json("/media/hqjin/Elements/em_data/Experiment_20220508_18-33-18/out/stitch/json/ECS_test9_cropped_010_S001R1-1.json", 
    #                 "/media/hqjin/Elements/em_data/Experiment_20220508_18-33-18/out/stitch/tmp/tile_layout.png")

    # get_mFov_layout_by_json("/media/hqjin/Elements/em_data/Experiment_20220508_18-33-18/out/stitch/json/ECS_test9_cropped_010_S001R1-1.json", 
    #                 "/media/hqjin/Elements/em_data/Experiment_20220508_18-33-18/out/stitch/tmp/mFov_layout.png")

    # get_mFov_layout_by_txt("/home/hqjin/tmp/full_image_coordinates.txt", "/home/hqjin/tmp/full_image_coordinates.png")

    # ret = check_rect_in_img_max_contour("/home/hqjin/tmp/imgs/002/S2M1_tr1-tc1.png", [3100, 2000, 10800, 10800])
    # print("ret: ", ret)

    # loop_check_rect_in_img_max_contour("/home/hqjin/tmp/imgs", 10800, 10800)

    # crop_img_by_rect([3100, 2000, 10800, 10800], "/home/hqjin/tmp/imgs/002/S2M8_tr1-tc1.png", "/home/hqjin/tmp/imgs/002/S2M8_tr1-tc1_tmp.png")

    # loop_crop_img_by_rect(10800, 10800, "/home/hqjin/tmp/imgs/", "/home/hqjin/tmp/crop_imgs", dst_img_name_prefix="sample1_")

    # convert_tif("/home/hqjin/tmp/Image.tif", "/home/hqjin/tmp/Image2.png")

    # loop_convert_tif("/media/hqjin/Elements/OEunion_data/om_data_wafer12_1", "/media/hqjin/Elements/OEunion_data/om_data_wafer12_2")

    # draw_img_center("/home/hqjin/tmp/imgs/002/S2M1_tr1-tc1.png", "/home/hqjin/tmp/imgs/002/S2M1_tr1-tc1_tmp.png")

    # crop_optics_img_sec_to_mFovs("/media/hqjin/Elements/OEunion_data/om_data/Image-1-(2).png", 
    # "/home/hqjin/tmp/full_image_coordinates.txt", "/media/hqjin/Elements/OEunion_data/om_data_crop", (640, 1246))

    # manual_check_electronic_optical_imgs("/media/hqjin/Elements/OEunion_data/em_data/crop_imgs", "/media/hqjin/Elements/OEunion_data/om_data_crop")

    # all_img_names, row_col_dict = get_all_block_img_name_row_col_range("/media/hqjin/Elements/OEunion_data/em_data/sample1/out/stitch/imgs/009")
    # check_block_imgs_integral(all_img_names, row_col_dict)
    # merge_block_imgs(all_img_names, row_col_dict, 0.05, "/media/hqjin/Elements/OEunion_data/em_data/sample1/out/stitch/thumbnail_imgs/S9M1-32.png")

    # loop_merge_block_imgs("/media/hqjin/Elements/OEunion_data/em_data/sample1/out/stitch/img_8nm", "/media/hqjin/Elements/OEunion_data/em_data/sample1/out/stitch/merge_imgs_32nm", scale=0.05, process_num=args.process_num)

    # crop_optics_img_sec("/media/hqjin/Elements/OEunion_data/em_data/sample1/out/stitch/thumbnail_imgs_draw/S2M1-33.png", 
    # "/media/hqjin/Elements/OEunion_data/om_data/Image-1-002.png", 8, 0.05, 345, (1618, 1764), "/media/hqjin/Elements/OEunion_data/om_data_sec_crop")

    # crop_optics_img_sec2("/media/hqjin/Elements/OEunion_data/em_data/sample1/out/align/merge_img_128nm_draw/S2M1-33.png", 
    # "/media/hqjin/Elements/OEunion_data/om_data_align/Image-1-002.png", 8, 0.0625, 345, (2876, 2470), "/media/hqjin/Elements/OEunion_data/om_data_align_sec_crop")
    # loop_crop_optics_img_sec2("/media/hqjin/Elements/OEunion_data/em_data/sample1_1um/out/align/img_128nm", "/media/hqjin/Elements/OEunion_data/om_data4_align", 8, 0.0625, 345, (2944, 2538), "/media/hqjin/Elements/OEunion_data/om_data4_align_sec_crop")

    # convert_src_section_imgs("/media/hqjin/Elements/em_data/Experiment_20220508_18-33-18/001_S1R1", "/media/hqjin/Elements/em_data/Experiment_20220508_18-33-18-2/001_S1R1")

    # change_thumbbail_img_name("/media/hqjin/Elements/em_data/mec/20230321_MEC/wafer5/数据质量/overview_imgs", "/media/hqjin/Elements/em_data/mec/20230321_MEC/wafer5/数据质量/overview_imgs_change_name")

    # 顺时针旋转15度(0.26179938), 30度(0.52359877), 45度(0.78539815), 60度(1.04719753), 75度(1.30899692), 90度(1.5707963), 
    # 105度(1.83259568), 120度(2.09439507), 135度(2.35619445), 150度(2.61799383), 165度(2.87979322), 180度(3.1415926)
    # change_json2("/media/hqjin/Elements/em_data/Experiment_20220508_18-33-18/out/stitch/json/S2M1-2.json", "/media/hqjin/Elements/em_data/Experiment_20220508_18-33-18/out/stitch/json2/S2M1-2.json", 1.04719753)

    # gen_section_thumbnail("/media/hqjin/Elements/em_data/mec/20230321_MEC/wafer4/008_S8R1", "/media/hqjin/Elements/em_data/mec/20230321_MEC/wafer4/out/thumb", scale=0.25)
    # loop_gen_section_thumbnail("/media/hqjin/Elements/em_data/mec/20230321_MEC/wafer4", "/media/hqjin/Elements/em_data/mec/20230321_MEC/wafer4/out/thumb", scale=0.25)

    # manual_rotate_imgs("/media/hqjin/Elements/em_data/mec/20230321_MEC/wafer24/数据质量/overview_imgs_change_name2", "/media/hqjin/Elements/em_data/mec/20230321_MEC/wafer24/数据质量/overview_imgs_change_name3")

    # print_change_json_info("/media/hqjin/Elements/em_data/mec/20230321_MEC/wafer24/数据质量/overview_imgs_change_name3/angle.xlsx", \
    #     "/media/hqjin/Elements/em_data/mec/20230321_MEC/wafer24/out/stitch/json_src", "/media/hqjin/Elements/em_data/mec/20230321_MEC/wafer24/out/stitch/json_src2")

    # copy_from_other_section("/media/hqjin/Elements/em_data/Experiment_20220508_18-33-18/out/align/img_8nm/00002", "/media/hqjin/Elements/em_data/Experiment_20220508_18-33-18/out/align/img_8nm/00003")

    # change_stitch_json_sec_idx("/media/hqjin/Elements/em_data/mec/20230321_MEC/wafer26/out/stitch/json_src/S1M1-163.json", "/media/hqjin/Elements/em_data/mec/20230321_MEC/wafer26/out/stitch/json_src2", 1108)
    # loop_change_stitch_json_sec_idx("/media/hqjin/Elements/em_data/mec/20230321_MEC/wafer26/out/stitch/json_src", "/media/hqjin/Elements/em_data/mec/20230321_MEC/wafer26/out/stitch/json_src2", 1108)

    # choose_mFov_from_json("/media/hqjin/Elements/em_data/mec/20230321_MEC/wafer26/out/stitch/json_src", "/media/hqjin/Elements/em_data/mec/20230321_MEC/wafer26/out/stitch/json_src2", "/media/hqjin/Elements/em_data/mec/20230321_MEC/wafer26/choose_mFov.txt")

    # draw_rect_interact("/media/hqjin/Elements/em_data/mec/20230321_MEC/wafer28/out/stitch/json_src2", "/media/hqjin/Elements/em_data/mec/20230321_MEC/wafer28/out/stitch/img_256nm", 
    #                    "/media/hqjin/Elements/em_data/mec/20230321_MEC/wafer28/out/stitch/json_choose_tiles", "/media/hqjin/Elements/em_data/mec/20230321_MEC/wafer28/out/stitch/img_256nm_draw", render_scale=0.015625)

    draw_rect_interact("E:\wafer29\stitch\json_src2", "E:\wafer29\stitch\img_2048nm",
                       "E:\wafer29\stitch\json_choose_tiles2", "E:\wafer29\stitch\img_256nm_draw2",
                       "E:\wafer29\stitch\img_mask",
                       render_scale=0.015625 / 8)

    # get_tile_from_point((3474, 3329), "/media/hqjin/Elements/em_data/Experiment_20220508_18-33-18/out/stitch/json/S1M1-1.json", scale=0.5)


if __name__ == '__main__':
    main()
