import os
import sys
sys.path.append(".")
import cv2
import argparse
import time
import numpy as np
from multiprocessing import Pool
import re
import inspect  # 用于获取函数信息
from concurrent.futures import ThreadPoolExecutor
import constant as const
from tools import file_utils


def downsample_img(src_img_path, dst_img_path, scale=0.25):
    def downsample(src_img_path, dst_img_path, scale):
        img = cv2.imread(src_img_path, 0)
        img = cv2.resize(img, (0, 0), fx=scale, fy=scale, interpolation=cv2.INTER_LINEAR)
        cv2.imwrite(dst_img_path, img)

    if isinstance(src_img_path, str) and isinstance(dst_img_path, str):  # 传入参数为单张图片路径的情况
        file_utils.create_save_dir_from_file(dst_img_path)
        img = cv2.imread(src_img_path, 0)
        img = cv2.resize(img, (0, 0), fx=scale, fy=scale, interpolation=cv2.INTER_LINEAR)
        cv2.imwrite(dst_img_path, img)
    elif isinstance(src_img_path, list) and isinstance(dst_img_path, list):  # 传入参数为图片路径列表的情况
        file_utils.create_save_dir_from_file(dst_img_path[0])
        # # 这种方式还是稍慢
        # for i in range(len(src_img_path)):
        #     img = cv2.imread(src_img_path[i], 0)
        #     img = cv2.resize(img, (0, 0), fx=scale, fy=scale, interpolation=cv2.INTER_LINEAR)
        #     cv2.imwrite(dst_img_path[i], img)

        # 用多线程的方式稍快
        with ThreadPoolExecutor(max_workers=5) as executor:
            results = executor.map(downsample, src_img_path, dst_img_path, [scale] * len(src_img_path))


def batch_downsample(src_dir, dst_dir, scale=1.0, process_num=1):
    """对src_dir文件夹中所有图片进行降采样, 并将结果保存在dst_dir中。
       用多进程进行处理, 节省时间。如果传入参数process_num小于0, 那么process_num会赋值为机器的cpu个数。"""
    print("\033[31m<=== {} {}() start. ===>\033[0m".format(time.strftime("%Y-%m-%d %H:%M:%S"), inspect.currentframe().f_code.co_name))
    src_dir = file_utils.get_abs_dir(src_dir)
    dst_dir = file_utils.create_dir(dst_dir)
    
    img_paths = [f for f in os.listdir(src_dir) if f.rsplit(".", 1)[1].lower() in const.IMG_TYPE_LIST]
    process_num = os.cpu_count() if process_num < 0 else process_num
    print("process_num: ", process_num)
    batch_num = max(int(np.ceil(len(img_paths) / process_num)), 1)

    src_img_paths = [os.path.join(src_dir, f) for f in img_paths]
    dst_img_paths = [os.path.join(dst_dir, f) for f in img_paths]
    src_group = [src_img_paths[i: i + batch_num] for i in range(0, len(src_img_paths), batch_num)]
    dst_group = [dst_img_paths[i: i + batch_num] for i in range(0, len(dst_img_paths), batch_num)]

    pool = Pool(processes=process_num)
    res_l = []
    for i in range(len(src_group)):
        res = pool.apply_async(downsample_img, (src_group[i], dst_group[i], scale))
        res_l.append(res)

    pool.close()
    pool.join()

    for i in res_l:
        res = i.get()
    print("\033[32m<=== {} {}() end. ===>\033[0m".format(time.strftime("%Y-%m-%d %H:%M:%S"), inspect.currentframe().f_code.co_name))


def merge_tiles(src_dir, dst_img_path):
    print("\033[31m<=== {} {}() start. ===>\033[0m".format(time.strftime("%Y-%m-%d %H:%M:%S"), inspect.currentframe().f_code.co_name))
    src_dir = file_utils.get_abs_dir(src_dir)
    dst_dir = file_utils.create_save_dir_from_file(dst_img_path)

    img_paths = [os.path.join(src_dir, f) for f in os.listdir(src_dir) if f.rsplit(".", 1)[1].lower() in const.IMG_TYPE_LIST]

    # 使用正则表达式提取行列信息
    # pattern = re.compile(r'S\d+M\d+-\d+_tr(\d+)-tc(\d+)\.png')
    pattern = re.compile(r'tr(\d+)-tc(\d+)')
    row_col_dict = file_utils.get_row_col_info(img_paths, pattern)
    min_row, max_row, min_col, max_col = row_col_dict["min_row"], row_col_dict["max_row"], row_col_dict["min_col"], row_col_dict["max_col"]
    assert (max_row - min_row + 1) * (max_col - min_col + 1) == len(img_paths)  # 验证文件夹下文件个数是对的

    # 假定所有tile图片shape都相同，下方会验证，这里取第一张图片的shape
    img_shape = cv2.imread(img_paths[0], 0).shape
    merge_img = np.zeros(((max_row - min_row + 1) * img_shape[0], (max_col - min_col + 1) * img_shape[1]), np.uint8)

    for img_path in img_paths:
        img = cv2.imread(img_path, 0)
        assert img.shape == img_shape
        row_col = pattern.findall(img_path)[0]
        x0, y0 = (int(row_col[1]) - min_col) * img_shape[1], (int(row_col[0]) - min_row) * img_shape[0]
        merge_img[y0:y0 + img_shape[0], x0:x0 + img_shape[1]] = img
    cv2.imwrite(dst_img_path, merge_img)
    print("\033[32m<=== {} {}() end. ===>\033[0m".format(time.strftime("%Y-%m-%d %H:%M:%S"), inspect.currentframe().f_code.co_name))


def combine_4_tiles_to_larger_tile_with_overlap(src_dir, dst_dir):
    """这个函数是把256nm下的4个tile合并成1个tile, 即T(i,j), T(i+1,j), T(i,j+1), T(i+1,j+1)合并成1个新的T(i,j)"""
    print("\033[31m<=== {} {}() start. ===>\033[0m".format(time.strftime("%Y-%m-%d %H:%M:%S"), inspect.currentframe().f_code.co_name))
    src_dir = file_utils.get_abs_dir(src_dir)
    dst_dir = file_utils.create_dir(dst_dir)

    img_paths = [os.path.join(src_dir, f) for f in os.listdir(src_dir) if f.rsplit(".", 1)[1].lower() in const.IMG_TYPE_LIST]
    # 使用正则表达式提取行列信息
    pattern = re.compile(r'tr(\d+)-tc(\d+)')
    row_col_dict = file_utils.get_row_col_info(img_paths, pattern)
    min_row, max_row, min_col, max_col = row_col_dict["min_row"], row_col_dict["max_row"], row_col_dict["min_col"], row_col_dict["max_col"]
    assert (max_row - min_row + 1) * (max_col - min_col + 1) == len(img_paths)  # 验证文件夹下文件个数是对的

    # 假定所有tile图片shape都相同(不验证了，节省时间)
    img_name_prefix = os.path.basename(img_paths[0]).split("_tr")[0]
    img_type = os.path.basename(img_paths[0]).rsplit(".", 1)[1]

    img1_paths, img2_paths, img3_paths, img4_paths, dst_img_paths = [], [], [], [], []
    for row in range(min_row, max_row):
        for col in range(min_col, max_col):
            img1 = cv2.imread(os.path.join(src_dir, "{}_tr{}-tc{}.{}".format(img_name_prefix, row, col, img_type)), 0)
            img2 = cv2.imread(os.path.join(src_dir, "{}_tr{}-tc{}.{}".format(img_name_prefix, row, col + 1, img_type)), 0)
            img3 = cv2.imread(os.path.join(src_dir, "{}_tr{}-tc{}.{}".format(img_name_prefix, row + 1, col, img_type)), 0)
            img4 = cv2.imread(os.path.join(src_dir, "{}_tr{}-tc{}.{}".format(img_name_prefix, row + 1, col + 1, img_type)), 0)
            assert img1.shape == img2.shape == img3.shape == img4.shape

            dst_img = np.zeros((img1.shape[0] * 2, img1.shape[1] * 2), np.uint8)
            dst_img[:img1.shape[0], :img1.shape[1]] = img1
            dst_img[:img1.shape[0], img1.shape[1]:] = img2
            dst_img[img1.shape[0]:, :img1.shape[1]] = img3
            dst_img[img1.shape[0]:, img1.shape[1]:] = img4
            dst_img_path = os.path.join(dst_dir, "{}_tr{}-tc{}.{}".format(img_name_prefix, row, col, img_type))
            cv2.imwrite(dst_img_path, dst_img)

            # img1_path = os.path.join(src_dir, "{}_tr{}-tc{}.{}".format(img_name_prefix, row, col, img_type))
            # img2_path = os.path.join(src_dir, "{}_tr{}-tc{}.{}".format(img_name_prefix, row, col + 1, img_type))
            # img3_path = os.path.join(src_dir, "{}_tr{}-tc{}.{}".format(img_name_prefix, row + 1, col, img_type))
            # img4_path = os.path.join(src_dir, "{}_tr{}-tc{}.{}".format(img_name_prefix, row + 1, col + 1, img_type))
            # dst_img_path = os.path.join(dst_dir, "{}_tr{}-tc{}.{}".format(img_name_prefix, row, col, img_type))
            # img1_paths.append(img1_path)
            # img2_paths.append(img2_path)
            # img3_paths.append(img3_path)
            # img4_paths.append(img4_path)
            # dst_img_paths.append(dst_img_path)

    # pass
    # pool = Pool(processes=process_num)
    # res_l = []
    # for i in range(len(src_group)):
    #     res = pool.apply_async(downsample_img, (src_group[i], dst_group[i], scale))
    #     res_l.append(res)

    # pool.close()
    # pool.join()

    # for i in res_l:
    #     res = i.get()
    print("\033[32m<=== {} {}() end. ===>\033[0m".format(time.strftime("%Y-%m-%d %H:%M:%S"), inspect.currentframe().f_code.co_name))


def combine_tiles_to_larger_tiles_with_overlap(src_dir, dst_dir, img_shape, start_row, start_col, subgrid_row_num, subgrid_col_num, row_overlap, col_overlap, img_name_prefix):
    """看batch_combine_tiles_to_larger_tiles_with_overlap的注释, 这里是为了加速，每个进程处理一部分"""
    src_dir = file_utils.get_abs_dir(src_dir)
    dst_dir = file_utils.create_dir(dst_dir)

    img_combined = np.zeros((subgrid_row_num * img_shape[0], subgrid_col_num * img_shape[1]), np.uint8)
    for i in range(start_row, start_row + subgrid_row_num):
        for j in range(start_col, start_col + subgrid_col_num):
            tmp_img_path = os.path.join(src_dir, f"{img_name_prefix}_tr{i}-tc{j}.png")
            tmp_img = cv2.imread(tmp_img_path, 0)
            assert tmp_img.shape == img_shape
            y0 = (i - start_row) * img_shape[0]
            x0 = (j - start_col) * img_shape[1]
            img_combined[y0:y0 + img_shape[0], x0:x0 + img_shape[1]] = tmp_img
    img_combined_path = os.path.join(dst_dir, "{}_tr{}-tc{}.png".format(img_name_prefix, int(np.ceil((start_row - 1) / (subgrid_row_num - row_overlap))) + 1,
                                                                        int(np.ceil((start_col - 1) / (subgrid_col_num - col_overlap))) + 1))
    cv2.imwrite(img_combined_path, img_combined)


def batch_combine_tiles_to_larger_tiles_with_overlap(src_dir, dst_dir, process_num=1):
    """这个函数是把src_dir文件夹下的多张32nm图片合并成多个稍大的图像, 并且稍大的图像之间有overlap, 结果保存在dst_dir文件夹下, 这里用多进程处理。
       注意: 这个需要和flow.py下的batch_combine_flows_to_larger_flows对应。"""
    print("\033[31m<=== {} {}() start. ===>\033[0m".format(time.strftime("%Y-%m-%d %H:%M:%S"), inspect.currentframe().f_code.co_name))
    src_dir = file_utils.get_abs_dir(src_dir)
    dst_dir = file_utils.create_dir(dst_dir)

    img_paths = [os.path.join(src_dir, f) for f in os.listdir(src_dir) if f.rsplit(".", 1)[1].lower() in const.IMG_TYPE_LIST]
    img_name_prefix = os.path.basename(img_paths[0]).split("_tr")[0]
    # 使用正则表达式提取行列信息
    pattern = re.compile(r'tr(\d+)-tc(\d+)')
    row_col_dict = file_utils.get_row_col_info(img_paths, pattern)
    min_row, max_row, min_col, max_col = row_col_dict["min_row"], row_col_dict["max_row"], row_col_dict["min_col"], row_col_dict["max_col"]
    assert (max_row - min_row + 1) * (max_col - min_col + 1) == len(img_paths)  # 验证文件夹下文件个数是对的

    subgrid_row_num, subgrid_col_num = const.SUBGRID_ROW_NUM, const.SUBGRID_COL_NUM  # 设置7*7个小的图片合并成一个大的图片
    row_overlap, col_overlap = const.ROW_OVERLAP, const.COL_OVERLAP  # 设置大的图片之间的overlap

    img_shape = cv2.imread(img_paths[0], 0).shape  # 默认所有图片shape相同(下面会判断)
    subgrid_ids = set()  # 判断是否已经有相同的id了，如果已经有就不处理了，这有时在行或列的末端会出现重复的情况，记录每个大的图片的起始行列索引
    for subgrid_row in range(min_row, max_row + 1, subgrid_row_num - row_overlap):
        if subgrid_row + subgrid_row_num - 1 > max_row:
            subgrid_row = max_row - subgrid_row_num + 1

        for subgrid_col in range(min_col, max_col + 1, subgrid_col_num - col_overlap):
            if subgrid_col + subgrid_col_num - 1 > max_col:
                subgrid_col = max_col - subgrid_col_num + 1

            subgrid_id = (subgrid_row, subgrid_col)
            if subgrid_id in subgrid_ids:
                continue
            subgrid_ids.add(subgrid_id)

    process_num = os.cpu_count() if process_num < 0 else process_num
    process_num = min(process_num, len(subgrid_ids))  # 根据实际情况开辟多少进程
    print("process_num: ", process_num)

    pool = Pool(processes=process_num)
    res_l = []
    for id in subgrid_ids:
        res = pool.apply_async(combine_tiles_to_larger_tiles_with_overlap, (src_dir, dst_dir, img_shape, id[0], id[1],
                                                                            subgrid_row_num, subgrid_col_num, row_overlap, col_overlap, img_name_prefix))
        res_l.append(res)

    pool.close()
    pool.join()

    for i in res_l:
        res = i.get()
    print("\033[32m<=== {} {}() end. ===>\033[0m".format(time.strftime("%Y-%m-%d %H:%M:%S"), inspect.currentframe().f_code.co_name))


def get_small_tiles_from_combine_tiles(refine_img_dir, dst_img_dir, src_min_row, src_max_row, src_min_col, src_max_col,
                                       tile_size, row_overlap=const.ROW_OVERLAP, col_overlap=const.COL_OVERLAP):
    """这个函数是将有重叠区域的32nm的大图拆分为原先的一张张小图, 相当于batch_combine_tiles_to_larger_tiles_with_overlap的逆转过程。
       其中: refine_img_dir是有重叠区域的32nm的大图的文件夹路径;
            dst_img_dir是存放一张张无重叠区域的小图路径;
            src_min_row是原始32nm下无重叠时最小row的索引;
            src_max_row是原始32nm下无重叠时最大row的索引;
            src_min_col是原始32nm下无重叠时最小col的索引;
            src_max_col是原始32nm下无重叠时最大col的索引;
            tile_size是原始32nm下每张小图对应的size, 这里宽高相同;
            row_overlap是batch_combine_tiles_to_larger_tiles_with_overlap过程中图像行方向的重叠个数;
            col_overlap是batch_combine_tiles_to_larger_tiles_with_overlap过程中图像列方向的重叠个数。"""
    print("\033[31m<=== {} {}() start. ===>\033[0m".format(time.strftime("%Y-%m-%d %H:%M:%S"), inspect.currentframe().f_code.co_name))
    refine_img_dir = file_utils.get_abs_dir(refine_img_dir)
    dst_img_dir = file_utils.create_dir(dst_img_dir)

    img_paths = [os.path.join(refine_img_dir, f) for f in os.listdir(refine_img_dir) if f.rsplit(".", 1)[1].lower() in const.IMG_TYPE_LIST]
    img_name_prefix = os.path.basename(img_paths[0]).split("_tr")[0]
    img_type = os.path.basename(img_paths[0]).rsplit(".", 1)[1]
    l_img_shape = cv2.imread(img_paths[0], 0).shape  # 每张大图的shape，需确保一致
    # 使用正则表达式提取行列信息
    pattern = re.compile(r'tr(\d+)-tc(\d+)')
    row_col_dict = file_utils.get_row_col_info(img_paths, pattern)
    min_row, max_row, min_col, max_col = row_col_dict["min_row"], row_col_dict["max_row"], row_col_dict["min_col"], row_col_dict["max_col"]
    assert (max_row - min_row + 1) * (max_col - min_col + 1) == len(img_paths)  # 验证文件夹下文件个数是对的
    assert min_row == src_min_row and min_col == src_min_col  # 主要是之前的步骤会影响生成的文件名，所以最好check一下

    s_tile_row_num = int(l_img_shape[0] / tile_size)  # 算出当前tmp_img原先是由几个小的tile按行组成的
    s_tile_col_num = int(l_img_shape[1] / tile_size)  # 算出当前tmp_img原先是由几个小的tile按列组成的

    for row in range(min_row, max_row + 1):
        # print(f"row: {row}")
        if row == min_row:
            s_start_row = min_row
            s_end_row = s_start_row + s_tile_row_num - min_row - row_overlap // 2
            l_start_row = 0  # 需要从大图中取出每一小块给小图，这就是标记从大图中什么位置开始取
            l_end_row = s_tile_row_num - row_overlap // 2 - 1 # 需要从大图中取出每一小块给小图，这就是标记从大图中哪个块终止取(-1是因为该变量表示终止块的起始位置，加上tile_size才是终止块的结束位置)
        elif row < max_row:
            s_start_row = s_end_row + 1
            s_end_row = s_start_row + s_tile_row_num - min_row - (row_overlap // 2) * 2
            l_start_row = row_overlap // 2
            l_end_row = s_tile_row_num - row_overlap // 2 - 1
        else:
            s_start_row = s_end_row + 1
            s_end_row = min(src_max_row, s_start_row + s_tile_row_num - min_row - row_overlap // 2)
            l_end_row = s_tile_row_num - 1  # 终点块的起始位置，加上tile_size才是终止块的结束位置
            l_start_row = l_end_row - (s_end_row - s_start_row)  # 根据终点块确定起点块

        for col in range(min_col, max_col + 1):
            # print(f"col: {col}")
            tmp_img_path = os.path.join(refine_img_dir, f"{img_name_prefix}_tr{row}-tc{col}.{img_type}")
            tmp_img = cv2.imread(tmp_img_path, 0)
            assert tmp_img.shape == l_img_shape

            if col == min_col:
                s_start_col = min_col
                s_end_col = s_start_col + s_tile_col_num - min_col - col_overlap // 2
                l_start_col = 0
                l_end_col = s_tile_col_num - col_overlap // 2 - 1
            elif col < max_col:
                s_start_col = s_end_col + 1  # 更新
                s_end_col = s_start_col + s_tile_col_num - min_col - (col_overlap // 2) * 2
                l_start_col = col_overlap // 2
                l_end_col = s_tile_col_num - col_overlap // 2 - 1
            else:
                s_start_col = s_end_col + 1
                s_end_col = min(src_max_col, s_start_col + s_tile_col_num - min_col - col_overlap // 2)
                l_end_col = s_tile_col_num - 1  # 终点块的起始位置，加上tile_size才是终止块的结束位置
                l_start_col = l_end_col - (s_end_col - s_start_col)  # 根据终点块确定起点块

            assert s_end_row - s_start_row == l_end_row - l_start_row and s_end_col - s_start_col == l_end_col - l_start_col
            for i in range(l_end_row - l_start_row + 1):
                for j in range(l_end_col - l_start_col + 1):
                    img = tmp_img[(l_start_row + i) * tile_size:(l_start_row + i + 1) * tile_size, (l_start_col + j) * tile_size:(l_start_col + j + 1) * tile_size]
                    img_name = f"{img_name_prefix}_tr{s_start_row + i}-tc{s_start_col + j}.{img_type}"
                    # print(img_name)  # 调试的时候先打印名字看是否符合预期
                    cv2.imwrite(os.path.join(dst_img_dir, img_name), img)
    print("\033[32m<=== {} {}() end. ===>\033[0m".format(time.strftime("%Y-%m-%d %H:%M:%S"), inspect.currentframe().f_code.co_name))


def split_tiles(src_dir, dst_dir, split_size):
    print("\033[31m<=== {} {}() start. ===>\033[0m".format(time.strftime("%Y-%m-%d %H:%M:%S"), inspect.currentframe().f_code.co_name))
    src_dir = file_utils.get_abs_dir(src_dir)
    dst_dir = file_utils.create_dir(dst_dir)

    img_paths = [os.path.join(src_dir, f) for f in os.listdir(src_dir) if f.rsplit(".", 1)[1].lower() in const.IMG_TYPE_LIST]
    img_name_prefix = os.path.basename(img_paths[0]).split("_tr")[0]
    img_type = os.path.basename(img_paths[0]).rsplit(".", 1)[1]
    img_shape = cv2.imread(img_paths[0], 0).shape  # 每张大图的shape，需确保一致
    # 使用正则表达式提取行列信息
    pattern = re.compile(r'tr(\d+)-tc(\d+)')
    row_col_dict = file_utils.get_row_col_info(img_paths, pattern)
    min_row, max_row, min_col, max_col = row_col_dict["min_row"], row_col_dict["max_row"], row_col_dict["min_col"], row_col_dict["max_col"]
    assert (max_row - min_row + 1) * (max_col - min_col + 1) == len(img_paths)  # 验证文件夹下文件个数是对的

    assert img_shape[0] % split_size == 0 and img_shape[1] % split_size == 0
    row_num = int(img_shape[0] / split_size)
    col_num = int(img_shape[1] / split_size)
    
    for f in img_paths:
        img = cv2.imread(f, 0)
        row_col_info = pattern.findall(f)[0]
        row_idx, col_idx = int(row_col_info[0]), int(row_col_info[1])
        for i in range(row_num):
            for j in range(col_num):
                tmp_img = img[i * split_size:(i + 1) * split_size, j * split_size:(j + 1) * split_size]
                new_row_idx = (row_idx - min_row) * row_num + i + 1
                new_col_idx = (col_idx - min_col) * col_num + j + 1
                tmp_img_name = os.path.join(dst_dir, f"{img_name_prefix}_tr{new_row_idx}-tc{new_col_idx}.{img_type}")
                cv2.imwrite(tmp_img_name, tmp_img)
    print("\033[32m<=== {} {}() end. ===>\033[0m".format(time.strftime("%Y-%m-%d %H:%M:%S"), inspect.currentframe().f_code.co_name))


def main():
    parser = argparse.ArgumentParser(description="This is a auxiliary tool.")
    parser.add_argument("-i", "--in_dir", type=str, default="/media/hqjin/Elements/em_data", help="source dir")
    parser.add_argument("-o", "--out_dir", type=str, default="/media/hqjin/Elements/em_data", help="output dir")
    parser.add_argument("-p", "--process_num", type=int, default=16, help="the number of processes to use(default: 16)")
    args = parser.parse_args()
    
    # merge_tiles("/media/hqjin/Elements/em_data/mec/20230321_MEC/wafer52/out/align/refine/11867_32nm_refine",
    #             "/media/hqjin/Elements/em_data/mec/20230321_MEC/wafer52/out/align/refine/11867_32nm_refine_img.png")

    split_tiles("/media/hqjin/Elements/em_data/mec/20230321_MEC/wafer52/out/align/refine/11867_32nm_refine",
                "/media/hqjin/Elements/em_data/mec/20230321_MEC/wafer52/out/align/refine/11867_32nm_refine_split", 256)


if __name__ == '__main__':
    # main()
    pass
