import os
import sys
sys.path.append(".")
import cv2
import argparse
import time
import numpy as np
from multiprocessing import Pool
import re
from collections import defaultdict
from concurrent.futures import ThreadPoolExecutor
import gc
import inspect  # 用于获取函数信息
import glob
import constant as const
from tools import file_utils
# from tools import img_utils  # 这个在实际用的地方再导入(延迟导入)，避免循环导入报"No module"错


def cal_weight_matrix(w, h):
    """根据宽高得到一个权重矩阵，矩阵值在[0, 1]之间, 该矩阵左上角值为0, 右下角值为1, 越靠近左上角值越小, 越靠近右下角值越大"""
    x, y = np.linspace(0, 1, w), np.linspace(0, 1, h)
    xv, yv = np.meshgrid(x, y)
    weight_matrix = xv * yv
    return weight_matrix


def get_four_part_weight_matrices(w, h):
    # 计算一个角的权重矩阵
    weight_matrix = cal_weight_matrix(w, h)

    # 计算四个部分的权重矩阵(四个部分合起来的中间值权重最高，为1)
    lt = weight_matrix             # left-top
    rt = np.fliplr(weight_matrix)  # right-top
    lb = np.flipud(weight_matrix)  # left-bottom
    rb = np.flipud(rt)             # right-bottom
    # 可以验证，上面4个矩阵的值相加，每个点的值都为1
    # 创建4个部分的权重矩阵字典
    weight = {"lt": lt[:, :, np.newaxis], "rt": rt[:, :, np.newaxis], "lb": lb[:, :, np.newaxis], "rb": rb[:, :, np.newaxis]}  # 扩展维度是为了匹配后面的光流场shape
    return weight


WEIGHT_128_128 = get_four_part_weight_matrices(128, 128)
WEIGHT_256_256 = get_four_part_weight_matrices(256, 256)


def combine_flow(src_dir, weight_size):
    """根据src_dir文件夹中每个flow文件(由4块小区域组成), 计算出每个小区域的实际光流场，再合并成一整个大的光流场"""
    print("\033[31m<=== {} {}() start. ===>\033[0m".format(time.strftime("%Y-%m-%d %H:%M:%S"), inspect.currentframe().f_code.co_name))
    src_dir = file_utils.get_abs_dir(src_dir)

    files = [os.path.join(src_dir, f) for f in os.listdir(src_dir) if f.rsplit(".", 1)[1].lower() == "npy"]
    # 使用正则表达式提取行列信息
    pattern = re.compile(r'tr(\d+)-tc(\d+)')
    row_col_dict = file_utils.get_row_col_info(files, pattern)
    min_row, max_row, min_col, max_col = row_col_dict["min_row"], row_col_dict["max_row"], row_col_dict["min_col"], row_col_dict["max_col"]
    assert (max_row - min_row + 1) * (max_col - min_col + 1) == len(files)  # 验证文件夹下文件个数是对的

    shape = np.load(files[0]).shape
    half_h, half_w = shape[0] // 2, shape[1] // 2

    if weight_size == 128:
        weight_4_part = WEIGHT_128_128
    elif weight_size == 256:
        weight_4_part = WEIGHT_256_256
    else:
        raise Exception("Error! weight_size only support 128, 256 now!")
    
    tile_flow_dict = defaultdict(lambda: defaultdict(dict))  # 创建多级字典
    for f in files:
        row_col = pattern.findall(f)[0]
        flow = np.load(f)
        assert flow.shape == shape
        # 每个f分成lt, rt, lb, rb四部分
        lt_flow = flow[:half_h, :half_w] * weight_4_part["lt"]
        rt_flow = flow[:half_h, half_w:] * weight_4_part["rt"]
        lb_flow = flow[half_h:, :half_w] * weight_4_part["lb"]
        rb_flow = flow[half_h:, half_w:] * weight_4_part["rb"]
        lt_key = (int(row_col[0]), int(row_col[1]))  # 左上部分对应的key
        # 若一开始没有相应键值对，则赋值；如果有键值对，则相加
        tile_flow_dict[lt_key]["flow"] = lt_flow if len(tile_flow_dict[lt_key]["flow"]) == 0 else tile_flow_dict[lt_key]["flow"] + lt_flow
        tile_flow_dict[lt_key]["weight"] = weight_4_part["lt"] if len(tile_flow_dict[lt_key]["weight"]) == 0 else tile_flow_dict[lt_key]["weight"] + weight_4_part["lt"]

        rt_key = (int(row_col[0]), int(row_col[1]) + 1)  # 右上部分对应的key
        tile_flow_dict[rt_key]["flow"] = rt_flow if len(tile_flow_dict[rt_key]["flow"]) == 0 else tile_flow_dict[rt_key]["flow"] + rt_flow
        tile_flow_dict[rt_key]["weight"] = weight_4_part["rt"] if len(tile_flow_dict[rt_key]["weight"]) == 0 else tile_flow_dict[rt_key]["weight"] + weight_4_part["rt"]

        lb_key = (int(row_col[0]) + 1, int(row_col[1]))  # 左下部分对应的key
        tile_flow_dict[lb_key]["flow"] = lb_flow if len(tile_flow_dict[lb_key]["flow"]) == 0 else tile_flow_dict[lb_key]["flow"] + lb_flow
        tile_flow_dict[lb_key]["weight"] = weight_4_part["lb"] if len(tile_flow_dict[lb_key]["weight"]) == 0 else tile_flow_dict[lb_key]["weight"] + weight_4_part["lb"]

        rb_key = (int(row_col[0]) + 1, int(row_col[1]) + 1)  # 右上部分对应的key
        tile_flow_dict[rb_key]["flow"] = rb_flow if len(tile_flow_dict[rb_key]["flow"]) == 0 else tile_flow_dict[rb_key]["flow"] + rb_flow
        tile_flow_dict[rb_key]["weight"] = weight_4_part["rb"] if len(tile_flow_dict[rb_key]["weight"]) == 0 else tile_flow_dict[rb_key]["weight"] + weight_4_part["rb"]
    
    # 对tile_flow_dict求光流场均值
    for k, v in tile_flow_dict.items():
        flow, weight = v["flow"], v["weight"]
        tile_flow_dict[k]["flow"] = flow / (weight + 1e-10)  # 更新成均值，并避免出现nan

    # 总的光流场tile行数是 max_row + 1 - min_row + 1，列数是 max_col + 1 - min_col + 1，tile的shape为(half_h, half_w)
    total_flow = np.zeros(((max_row + 1 - min_row + 1) * half_h, (max_col + 1 - min_col + 1) * half_w, shape[2]), np.float32)
    for k, v in tile_flow_dict.items():
        y0, x0 = (k[0] - min_row) * half_h, (k[1] - min_col) * half_w
        total_flow[y0:y0 + half_h, x0:x0 + half_w, :] = v["flow"]
    print("\033[32m<=== {} {}() end. ===>\033[0m".format(time.strftime("%Y-%m-%d %H:%M:%S"), inspect.currentframe().f_code.co_name))
    return total_flow


def combine_flows_to_larger_flows_with_overlap(src_dir, dst_dir, flow_shape, start_row, start_col, subgrid_row_num, subgrid_col_num, row_overlap, col_overlap):
    """看batch_combine_flows_to_larger_flows_with_overlap的注释, 这里是为了加速，每个进程处理一部分"""
    src_dir = file_utils.get_abs_dir(src_dir)
    dst_dir = file_utils.create_dir(dst_dir)

    flow_combined = np.zeros((subgrid_row_num * flow_shape[0], subgrid_col_num * flow_shape[1], flow_shape[2]), np.float32)
    for i in range(start_row, start_row + subgrid_row_num):
        for j in range(start_col, start_col + subgrid_col_num):
            tmp_flow_path = os.path.join(src_dir, f"tr{i}-tc{j}.npy")
            tmp_flow = np.load(tmp_flow_path, mmap_mode='r')
            assert tmp_flow.shape == flow_shape
            y0 = (i - start_row) * flow_shape[0]
            x0 = (j - start_col) * flow_shape[1]
            flow_combined[y0:y0 + flow_shape[0], x0:x0 + flow_shape[1], :] = tmp_flow
    flow_combined_path = os.path.join(dst_dir, "tr{}-tc{}.npy".format(int(np.ceil((start_row - 1) / (subgrid_row_num - row_overlap))) + 1,
                                                                      int(np.ceil((start_col - 1) / (subgrid_col_num - col_overlap))) + 1))
    np.save(flow_combined_path, flow_combined)


def batch_combine_flows_to_larger_flows_with_overlap(src_dir, dst_dir, process_num=1):
    """将src_dir文件夹下的多个小光流场合并成多个稍大的光流场, 并且稍大的光流场之间有overlap, 结果保存在dst_dir文件夹下, 这里用多进程处理"""
    print("\033[31m<=== {} {}() start. ===>\033[0m".format(time.strftime("%Y-%m-%d %H:%M:%S"), inspect.currentframe().f_code.co_name))
    src_dir = file_utils.get_abs_dir(src_dir)
    dst_dir = file_utils.create_dir(dst_dir)

    files = [os.path.join(src_dir, f) for f in os.listdir(src_dir) if f.rsplit(".", 1)[1].lower() == "npy"]
    # 使用正则表达式提取行列信息
    pattern = re.compile(r'tr(\d+)-tc(\d+)')
    row_col_dict = file_utils.get_row_col_info(files, pattern)
    min_row, max_row, min_col, max_col = row_col_dict["min_row"], row_col_dict["max_row"], row_col_dict["min_col"], row_col_dict["max_col"]
    assert (max_row - min_row + 1) * (max_col - min_col + 1) == len(files)  # 验证文件夹下文件个数是对的

    subgrid_row_num, subgrid_col_num = const.SUBGRID_ROW_NUM, const.SUBGRID_COL_NUM  # 设置7*7个小的光流场合并成一个大的光流场
    row_overlap, col_overlap = const.ROW_OVERLAP, const.COL_OVERLAP  # 设置大的光流场之间的overlap

    flow_shape = np.load(files[0]).shape  # 默认所有光流场shape相同(下面会判断)
    subgrid_ids = set()  # 判断是否已经有相同的id了，如果已经有就不处理了，这有时在行或列的末端会出现重复的情况，记录每个大光流场的起始行列索引
    for subgrid_row in range(min_row, max_row + 1, subgrid_row_num - row_overlap):
        if subgrid_row + subgrid_row_num - 1 > max_row:
            subgrid_row = max_row - subgrid_row_num + 1

        for subgrid_col in range(min_col, max_col + 1, subgrid_col_num - col_overlap):
            if subgrid_col + subgrid_col_num - 1 > max_col:
                subgrid_col = max_col - subgrid_col_num + 1

            subgrid_id = (subgrid_row, subgrid_col)
            if subgrid_id in subgrid_ids:
                continue
            subgrid_ids.add(subgrid_id)

    process_num = os.cpu_count() if process_num < 0 else process_num
    process_num = min(process_num, len(subgrid_ids))  # 根据实际情况开辟多少进程
    print("process_num: ", process_num)

    pool = Pool(processes=process_num)
    res_l = []
    for id in subgrid_ids:
        res = pool.apply_async(combine_flows_to_larger_flows_with_overlap, (src_dir, dst_dir, flow_shape, id[0], id[1],
                                                                            subgrid_row_num, subgrid_col_num, row_overlap, col_overlap))
        res_l.append(res)

    pool.close()
    pool.join()

    for i in res_l:
        res = i.get()
    print("\033[32m<=== {} {}() end. ===>\033[0m".format(time.strftime("%Y-%m-%d %H:%M:%S"), inspect.currentframe().f_code.co_name))


def get_small_flows_from_combine_flows(big_flow_dir, small_flow_dir, src_min_row, src_max_row, src_min_col, src_max_col,
                                       tile_size, row_overlap=const.ROW_OVERLAP, col_overlap=const.COL_OVERLAP):
    """这个函数是将有重叠区域的32nm的大光流场拆分为原先的一个个小光流场, 相当于batch_combine_flows_to_larger_flows_with_overlap的逆转过程。
       其中: big_flow_dir是有重叠区域的32nm的大光流场文件夹路径;
            small_flow_dir是存放一张张无重叠区域的小光流场文件夹路径;
            src_min_row是原始32nm下无重叠时最小row的索引;
            src_max_row是原始32nm下无重叠时最大row的索引;
            src_min_col是原始32nm下无重叠时最小col的索引;
            src_max_col是原始32nm下无重叠时最大col的索引;
            tile_size是原始32nm下每张小光流场对应的size, 这里宽高相同;
            row_overlap是batch_combine_flows_to_larger_flows_with_overlap过程中行方向的重叠个数;
            col_overlap是batch_combine_flows_to_larger_flows_with_overlap过程中列方向的重叠个数。"""
    print("\033[31m<=== {} {}() start. ===>\033[0m".format(time.strftime("%Y-%m-%d %H:%M:%S"), inspect.currentframe().f_code.co_name))
    big_flow_dir = file_utils.get_abs_dir(big_flow_dir)
    small_flow_dir = file_utils.create_dir(small_flow_dir)

    big_flow_paths = [os.path.join(big_flow_dir, f) for f in os.listdir(big_flow_dir) if f.rsplit(".", 1)[1].lower() == "npy"]
    name_prefix = os.path.basename(big_flow_paths[0]).split("tr")[0]
    big_flow_shape = np.load(big_flow_paths[0]).shape  # 每个大光流场的shape，需确保一致
    # 使用正则表达式提取行列信息
    pattern = re.compile(r'tr(\d+)-tc(\d+)')
    row_col_dict = file_utils.get_row_col_info(big_flow_paths, pattern)
    min_row, max_row, min_col, max_col = row_col_dict["min_row"], row_col_dict["max_row"], row_col_dict["min_col"], row_col_dict["max_col"]
    assert (max_row - min_row + 1) * (max_col - min_col + 1) == len(big_flow_paths)  # 验证文件夹下文件个数是对的
    assert min_row == src_min_row and min_col == src_min_col  # 主要是之前的步骤会影响生成的文件名，所以最好check一下

    s_tile_row_num = int(big_flow_shape[0] / tile_size)  # 算出当前tmp_img原先是由几个小的tile按行组成的
    s_tile_col_num = int(big_flow_shape[1] / tile_size)  # 算出当前tmp_img原先是由几个小的tile按行组成的

    for row in range(min_row, max_row + 1):
        # print(f"row: {row}")
        if row == min_row:
            s_start_row = min_row
            s_end_row = s_start_row + s_tile_row_num - min_row - row_overlap // 2
            l_start_row = 0  # 需要从大光流场中取出每一小块给小光流场，这就是标记从大光流场中什么位置开始取
            l_end_row = s_tile_row_num - row_overlap // 2 - 1 # 需要从大光流场中取出每一小块给小图，这就是标记从大光流场中哪个块终止取(-1是因为该变量表示终止块的起始位置，加上tile_size才是终止块的结束位置)
        elif row < max_row:
            s_start_row = s_end_row + 1
            s_end_row = s_start_row + s_tile_row_num - min_row - (row_overlap // 2) * 2
            l_start_row = row_overlap // 2
            l_end_row = s_tile_row_num - row_overlap // 2 - 1
        else:
            s_start_row = s_end_row + 1
            s_end_row = min(src_max_row, s_start_row + s_tile_row_num - min_row - row_overlap // 2)
            l_end_row = s_tile_row_num - 1  # 终点块的起始位置，加上tile_size才是终止块的结束位置
            l_start_row = l_end_row - (s_end_row - s_start_row)  # 根据终点块确定起点块

        for col in range(min_col, max_col + 1):
            # print(f"col: {col}")
            tmp_flow_path = os.path.join(big_flow_dir, f"{name_prefix}tr{row}-tc{col}.npy")
            tmp_flow = np.load(tmp_flow_path)
            assert tmp_flow.shape == big_flow_shape

            if col == min_row:
                s_start_col = min_col
                s_end_col = s_start_col + s_tile_col_num - min_col - col_overlap // 2
                l_start_col = 0
                l_end_col = s_tile_col_num - col_overlap // 2 - 1
            elif col < max_col:
                s_start_col = s_end_col + 1  # 更新
                s_end_col = s_start_col + s_tile_col_num - min_col - (col_overlap // 2) * 2
                l_start_col = col_overlap // 2
                l_end_col = s_tile_col_num - col_overlap // 2 - 1
            else:
                s_start_col = s_end_col + 1
                s_end_col = min(src_max_col, s_start_col + s_tile_col_num - min_col - col_overlap // 2)
                l_end_col = s_tile_col_num - 1  # 终点块的起始位置，加上tile_size才是终止块的结束位置
                l_start_col = l_end_col - (s_end_col - s_start_col)  # 根据终点块确定起点块

            assert s_end_row - s_start_row == l_end_row - l_start_row and s_end_col - s_start_col == l_end_col - l_start_col
            for i in range(l_end_row - l_start_row + 1):
                for j in range(l_end_col - l_start_col + 1):
                    flow = tmp_flow[(l_start_row + i) * tile_size:(l_start_row + i + 1) * tile_size, (l_start_col + j) * tile_size:(l_start_col + j + 1) * tile_size]
                    flow_name = f"{name_prefix}tr{s_start_row + i}-tc{s_start_col + j}.npy"
                    # print(flow_name)  # 调试的时候先打印名字看是否符合预期
                    np.save(os.path.join(small_flow_dir, flow_name), flow)
    print("\033[32m<=== {} {}() end. ===>\033[0m".format(time.strftime("%Y-%m-%d %H:%M:%S"), inspect.currentframe().f_code.co_name))


def upsample_flow(flow, scale, interpolation=cv2.INTER_CUBIC):
    h, w = flow.shape[:2]
    new_h, new_w = int(h * scale), int(w * scale)
    flow_up_x = cv2.resize(flow[:, :, 0], (new_w, new_h), interpolation=interpolation) * scale
    flow_up_y = cv2.resize(flow[:, :, 1], (new_w, new_h), interpolation=interpolation) * scale
    flow_up = np.stack((flow_up_x, flow_up_y), axis=2)
    del flow_up_x, flow_up_y  # 清理内存
    return flow_up


def upsample_flow_read_save(flow_file, dst_dir, scale, interpolation=cv2.INTER_CUBIC):
    flow = np.load(flow_file)
    flow_up = upsample_flow(flow, scale, interpolation)
    dst_flow_path = os.path.join(dst_dir, os.path.basename(flow_file))
    del flow  # 清理内存
    np.save(dst_flow_path, flow_up)


def batch_upsample_flows(src_dir, dst_dir, scale, process_num=50):
    print("\033[31m<=== {} {}() start. ===>\033[0m".format(time.strftime("%Y-%m-%d %H:%M:%S"), inspect.currentframe().f_code.co_name))
    src_dir = file_utils.get_abs_dir(src_dir)
    dst_dir = file_utils.create_dir(dst_dir)

    flow_files = [os.path.join(src_dir, f) for f in os.listdir(src_dir) if f.endswith('.npy')]
    with ThreadPoolExecutor(max_workers=process_num) as executor:
        futures = [executor.submit(upsample_flow_read_save, flow_file, dst_dir, scale) for flow_file in flow_files]

        for future in futures:
            future.result()
    print("\033[32m<=== {} {}() end. ===>\033[0m".format(time.strftime("%Y-%m-%d %H:%M:%S"), inspect.currentframe().f_code.co_name))


def split_flow(flow, tile_size, out_dir):
    """将flow(numpy数组)分成N个(tile_size*tile_size)的小光流场文件, 并存放在out_dir中"""
    print("\033[31m<=== {} {}() start. ===>\033[0m".format(time.strftime("%Y-%m-%d %H:%M:%S"), inspect.currentframe().f_code.co_name))
    out_dir = file_utils.create_dir(out_dir)

    h, w = flow.shape[:2]
    for y in range(0, h, tile_size):
        for x in range(0, w, tile_size):
            tmp_flow = flow[y:y + tile_size, x:x + tile_size, :]
            file_path = os.path.join(out_dir, "tr{}-tc{}.npy".format(y // tile_size + 1, x // tile_size + 1))  # 从tr1-tc1开始
            np.save(file_path, tmp_flow)
    print("\033[32m<=== {} {}() end. ===>\033[0m".format(time.strftime("%Y-%m-%d %H:%M:%S"), inspect.currentframe().f_code.co_name))


def extend_flow(flow, extend_size, border_type=cv2.BORDER_CONSTANT, value=0):
    ext_flow = cv2.copyMakeBorder(flow, extend_size, extend_size, extend_size, extend_size, border_type, value)
    ext_flow[:extend_size, extend_size:-extend_size, :] = flow[0, :, :]    # 填充上边界
    ext_flow[-extend_size:, extend_size:-extend_size, :] = flow[-1, :, :]  # 填充下边界
    ext_flow[extend_size:-extend_size, :extend_size, :] = flow[:, 0, np.newaxis]    # 填充左边界(这里必须用np.newaxis才能适用广播操作)
    ext_flow[extend_size:-extend_size, -extend_size:, :] = flow[:, -1, np.newaxis]  # 填充右边界
    # 填充四个角
    ext_flow[:extend_size, :extend_size, :] = flow[0, 0, :]
    ext_flow[-extend_size:, :extend_size, :] = flow[-1, 0, :]
    ext_flow[:extend_size, -extend_size:, :] = flow[0, -1, :]
    ext_flow[-extend_size:, -extend_size:, :] = flow[-1, -1, :]
    return ext_flow


def apply_flow_to_img(img, flow):
    assert img.shape[:2] == flow.shape[:2]
    h, w = img.shape[:2]
    flow_map = np.zeros((h, w, 2), np.float32)
    flow_map[..., 0] = flow[..., 0] + np.arange(w)
    flow_map[..., 1] = flow[..., 1] + np.arange(h)[:, np.newaxis]
    remapped_image = cv2.remap(img, flow_map, None, cv2.INTER_CUBIC)
    return remapped_image


def process_flow_to_img(src_img_dir, flow_dir, dst_img_dir, extend_size=512):
    """应用光流场到对应的图像中, 并保存之后的图像, 参数extend_size是需要对图像以及光流场扩展, 避免图像丢失的问题"""
    print("\033[31m<=== {} {}() start. ===>\033[0m".format(time.strftime("%Y-%m-%d %H:%M:%S"), inspect.currentframe().f_code.co_name))
    from tools import img_utils
    src_img_dir = file_utils.get_abs_dir(src_img_dir)
    flow_dir = file_utils.get_abs_dir(flow_dir)
    dst_img_dir = file_utils.create_dir(dst_img_dir)

    # 使用正则表达式提取行列信息
    pattern = re.compile(r'tr(\d+)-tc(\d+)')
    img_files = [os.path.join(src_img_dir, f) for f in os.listdir(src_img_dir) if f.rsplit(".", 1)[1].lower() in const.IMG_TYPE_LIST]
    img_row_col_dict = file_utils.get_row_col_info(img_files, pattern)
    img_min_row, img_max_row, img_min_col, img_max_col = img_row_col_dict["min_row"], img_row_col_dict["max_row"], img_row_col_dict["min_col"], img_row_col_dict["max_col"]
    assert (img_max_row - img_min_row + 1) * (img_max_col - img_min_col + 1) == len(img_files)  # 验证文件夹下文件个数是对的

    flow_files = [os.path.join(flow_dir, f) for f in os.listdir(flow_dir) if f.rsplit(".", 1)[1].lower() == "npy"]
    # 使用正则表达式提取行列信息
    flow_row_col_dict = file_utils.get_row_col_info(flow_files, pattern)
    flow_min_row, flow_max_row, flow_min_col, flow_max_col = flow_row_col_dict["min_row"], flow_row_col_dict["max_row"], flow_row_col_dict["min_col"], flow_row_col_dict["max_col"]
    assert (flow_max_row - flow_min_row + 1) * (flow_max_col - flow_min_col + 1) == len(flow_files)  # 验证文件夹下文件个数是对的

    assert img_min_row == flow_min_row and img_max_row == flow_max_row and img_min_col == flow_min_col and img_max_col == flow_max_col
    img_name_prefix = os.path.basename(img_files[0]).split("_tr")[0]
    img_type = os.path.basename(img_files[0]).rsplit(".", 1)[1]

    for row in range(img_min_row, img_max_row + 1):
        for col in range(img_min_col, img_max_col + 1):
            src_img = cv2.imread(os.path.join(src_img_dir, f"{img_name_prefix}_tr{row}-tc{col}.{img_type}"), 0)
            flow = np.load(os.path.join(flow_dir, f"tr{row}-tc{col}.npy"))

            # # 扩展边界
            # src_img = img_utils.extend_img(src_img, extend_size, cv2.BORDER_CONSTANT, 0)
            # flow = extend_flow(flow, extend_size, cv2.BORDER_CONSTANT, 0)
            # 应用光流场
            dst_img = apply_flow_to_img(src_img, flow)
            cv2.imwrite(os.path.join(dst_img_dir, f"{img_name_prefix}_tr{row}-tc{col}.{img_type}"), dst_img)
    print("\033[32m<=== {} {}() end. ===>\033[0m".format(time.strftime("%Y-%m-%d %H:%M:%S"), inspect.currentframe().f_code.co_name))


def composite_flow(flow1, flow2):
    """先执行光流场flow1, 再执行光流场flow2, 那么就可以根据这两个光流场直接求取复合光流场comp_flow"""
    assert flow1.shape == flow2.shape
    h, w, _ = flow1.shape

    # 创建映射矩阵
    flow1_map = np.zeros((h, w, 2), dtype=np.float32)
    flow2_map = np.zeros((h, w, 2), dtype=np.float32)

    # 用光流场a和b的偏移创建映射矩阵
    flow1_map[..., 0] = flow1[..., 0] + np.arange(w)
    flow1_map[..., 1] = flow1[..., 1] + np.arange(h)[:, np.newaxis]

    flow2_map[..., 0] = flow2[..., 0] + np.arange(w)
    flow2_map[..., 1] = flow2[..., 1] + np.arange(h)[:, np.newaxis]

    # 将光流场a应用于光流场b的偏移
    flow1_remap_x = cv2.remap(flow1[..., 0], flow2_map[..., 0], flow2_map[..., 1], interpolation=cv2.INTER_CUBIC)
    flow1_remap_y = cv2.remap(flow1[..., 1], flow2_map[..., 0], flow2_map[..., 1], interpolation=cv2.INTER_CUBIC)

    # 计算复合光流场
    comp_flow = np.zeros_like(flow1)
    comp_flow[..., 0] = flow2[..., 0] + flow1_remap_x
    comp_flow[..., 1] = flow2[..., 1] + flow1_remap_y
    return comp_flow


def composite_flows(flow_dir1, flow_dir2, comp_dir):
    """将flow_dir1和flow_dir2中对应的光流场(需要确保这两个文件夹下对应的文件名相同), 复合成一个新的光流场, 并存放在comp_dir下"""
    print("\033[31m<=== {} {}() start. ===>\033[0m".format(time.strftime("%Y-%m-%d %H:%M:%S"), inspect.currentframe().f_code.co_name))
    flow_dir1 = file_utils.get_abs_dir(flow_dir1)
    flow_dir2 = file_utils.get_abs_dir(flow_dir2)
    comp_dir = file_utils.create_dir(comp_dir)

    flow1_files = sorted([f for f in os.listdir(flow_dir1) if f.endswith('.npy')])
    flow2_files = sorted([f for f in os.listdir(flow_dir2) if f.endswith('.npy')])
    common_files = set(flow1_files).intersection(set(flow2_files))
    assert len(flow1_files) == len(flow2_files) and len(flow1_files) == len(common_files)  # 确保文件个数相同，文件名也相同

    for f in common_files:
        flow1 = np.load(os.path.join(flow_dir1, f))
        flow2 = np.load(os.path.join(flow_dir2, f))
        comp_flow = composite_flow(flow1, flow2)
        np.save(os.path.join(comp_dir, f), comp_flow)
    print("\033[32m<=== {} {}() end. ===>\033[0m".format(time.strftime("%Y-%m-%d %H:%M:%S"), inspect.currentframe().f_code.co_name))

def get_max_min_uv(flow_file):
    try:
        flow = np.load(flow_file)
        u, v = flow[..., 0], flow[..., 1]

        min_u_val = np.min(u)  # 计算u的极值
        min_u_idx_flat = np.argmin(u)  # argmin返回扁平化后数组的索引
        min_u_coord = np.unravel_index(min_u_idx_flat, u.shape)  # unravel_index将扁平索引转换为多维坐标(row, col),即原来数组中的位置

        max_u_val = np.max(u)
        max_u_idx_flat = np.argmax(u)
        max_u_coord = np.unravel_index(max_u_idx_flat, u.shape)

        min_v_val = np.min(v)
        min_v_idx_flat = np.argmin(v)
        min_v_coord = np.unravel_index(min_v_idx_flat, v.shape)

        max_v_val = np.max(v)
        max_v_idx_flat = np.argmax(v)
        max_v_coord = np.unravel_index(max_v_idx_flat, v.shape)

        # 返回一个结构化的字典，信息更清晰
        return {"flow_path": flow_file,
                "min_u": {"value": min_u_val, "coord": min_u_coord},
                "max_u": {"value": max_u_val, "coord": max_u_coord},
                "min_v": {"value": min_v_val, "coord": min_v_coord},
                "max_v": {"value": max_v_val, "coord": max_v_coord}}
    except Exception as e:
        print(f"Load {flow_file} error: {e}")
        return None

    
def get_flow_range(src_dir, start_row_idx, end_row_idx, start_col_idx, end_col_idx, process_num=-1):
    src_dir = file_utils.get_abs_dir(src_dir)
    flow_path_list = glob.glob(os.path.join(src_dir, '*.npy'))

    pattern = re.compile(r"tr(\d+)-tc(\d+)")
    row_col_dict = file_utils.get_row_col_info(flow_path_list, pattern)
    min_row, max_row, min_col, max_col = row_col_dict["min_row"], row_col_dict["max_row"], row_col_dict["min_col"], row_col_dict["max_col"]
    assert (max_row - min_row + 1) * (max_col - min_col + 1) == len(flow_path_list)  # 验证文件夹下文件个数是对的
    start_row_idx = min(max(start_row_idx, min_row), max_row)
    end_row_idx = max_row if end_row_idx == -1 else max(min(end_row_idx, max_row), min_row)
    start_col_idx = min(max(start_col_idx, min_col), max_col)
    end_col_idx = max_col if end_col_idx == -1 else max(min(end_col_idx, max_col), min_col)

    process_flow_path_list = []
    for tmp_flow_path in flow_path_list:
        tmp_info = pattern.findall(tmp_flow_path)[0]
        tmp_row, tmp_col = int(tmp_info[0]), int(tmp_info[1])
        if tmp_row in range(start_row_idx, end_row_idx + 1) and tmp_col in range(start_col_idx, end_col_idx + 1):
            process_flow_path_list.append(tmp_flow_path)
    print(f"process flow number: {len(process_flow_path_list)}")

    process_num = min(os.cpu_count(), len(process_flow_path_list)) if process_num == -1 else min(max(process_num, 1), len(process_flow_path_list))

    with Pool(processes=process_num) as pool:
        worker_results = pool.map(get_max_min_uv, process_flow_path_list)

    valid_results = [res for res in worker_results if res is not None]
    if not valid_results:
        print("No valid result.")
        return
    
    # 使用 float('inf') 和 float('-inf') 作为初始值，这是一种健壮的编程模式
    global_min_u = {"value": float("inf"), "file": None, "coord": None}
    global_max_u = {"value": float("-inf"), "file": None, "coord": None}
    global_min_v = {"value": float("inf"), "file": None, "coord": None}
    global_max_v = {"value": float("-inf"), "file": None, "coord": None}

    for res in valid_results:
        if res["min_u"]["value"] < global_min_u["value"]:
            global_min_u["value"] = res["min_u"]["value"]
            global_min_u["file"] = os.path.basename(res["flow_path"])
            global_min_u["coord"] = res["min_u"]["coord"]

        if res["max_u"]["value"] > global_max_u["value"]:
            global_max_u["value"] = res["max_u"]["value"]
            global_max_u["file"] = os.path.basename(res["flow_path"])
            global_max_u["coord"] = res["max_u"]["coord"]
            
        if res["min_v"]["value"] < global_min_v["value"]:
            global_min_v["value"] = res["min_v"]["value"]
            global_min_v["file"] = os.path.basename(res["flow_path"])
            global_min_v["coord"] = res["min_v"]["coord"]

        if res["max_v"]["value"] > global_max_v["value"]:
            global_max_v["value"] = res["max_v"]["value"]
            global_max_v["file"] = os.path.basename(res["flow_path"])
            global_max_v["coord"] = res["max_v"]["coord"]
            
    print(f"src_dir: {src_dir}")
    print(f"global_min_u: {global_min_u}")
    print("global_min_u real NG coord(y, x): ({}, {})".format((int(global_min_u["file"].split("tr")[1].split("-")[0]) - 12) * 4096 + global_min_u["coord"][0], (int(global_min_u["file"].split("tc")[1].split(".")[0]) - 15) * 4096 + global_min_u["coord"][1]))
    print(f"global_max_u: {global_max_u}")
    print("global_max_u real NG coord(y, x): ({}, {})".format((int(global_max_u["file"].split("tr")[1].split("-")[0]) - 12) * 4096 + global_max_u["coord"][0], (int(global_max_u["file"].split("tc")[1].split(".")[0]) - 15) * 4096 + global_max_u["coord"][1]))
    print(f"global_min_v: {global_min_v}")
    print("global_min_v real NG coord(y, x): ({}, {})".format((int(global_min_v["file"].split("tr")[1].split("-")[0]) - 12) * 4096 + global_min_v["coord"][0], (int(global_min_v["file"].split("tc")[1].split(".")[0]) - 15) * 4096 + global_min_v["coord"][1]))
    print(f"global_max_v: {global_max_v}")
    print("global_max_v real NG coord(y, x): ({}, {})".format((int(global_max_v["file"].split("tr")[1].split("-")[0]) - 12) * 4096 + global_max_v["coord"][0], (int(global_max_v["file"].split("tc")[1].split(".")[0]) - 15) * 4096 + global_max_v["coord"][1]))

def decay_flow(src_file, dst_file, decay_ratio):
    src_flow = np.load(src_file)
    dst_flow = src_flow * decay_ratio
    np.save(dst_file, dst_flow)

def decay_flows(src_dir, dst_dir, decay_ratio, process_num=-1):
    src_dir = file_utils.get_abs_dir(src_dir)
    file_utils.create_dir(dst_dir)
    flow_path_list = glob.glob(os.path.join(src_dir, '*.npy'))

    tasks = []
    for flow_path in flow_path_list:
        dst_flow_path = os.path.join(dst_dir, os.path.basename(flow_path))
        tasks.append((flow_path, dst_flow_path, decay_ratio))

    if not tasks:
        return
    
    process_num = min(os.cpu_count(), len(tasks)) if process_num == -1 else min(max(process_num, 1), len(tasks))
    with Pool(processes=process_num) as pool:
        # starmap会将tasks列表中的每个元组解包作为参数传递给decay_flow函数, 它会阻塞直到所有任务完成
        results = pool.starmap(decay_flow, tasks)


def main():
    parser = argparse.ArgumentParser(description="This is a auxiliary tool.")
    parser.add_argument("-i", "--in_dir", type=str, default="/media/hqjin/Elements/em_data", help="source dir")
    parser.add_argument("-o", "--out_dir", type=str, default="/media/hqjin/Elements/em_data", help="output dir")
    parser.add_argument("-p", "--process_num", type=int, default=16, help="the number of processes to use(default: 16)")
    args = parser.parse_args()

    # process_flow_to_img("/media/hqjin/Elements/em_data/mec/20230321_MEC/wafer52/out/align/refine/11867_32nm_combine",
    #                   "/media/hqjin/Elements/em_data/mec/20230321_MEC/wafer52/out/align/refine/11867_32nm_flow_combine",
    #                   "/media/hqjin/Elements/em_data/mec/20230321_MEC/wafer52/out/align/refine/11867_32nm_combine_rlt")

    composite_flows("/media/hqjin/Elements/em_data/mec/20230321_MEC/wafer52/out/align/refine/11867_32nm_coarse_flow",
                    "/media/hqjin/Elements/em_data/mec/20230321_MEC/wafer52/out/align/refine/11867_32nm_fine_flow",
                    "/media/hqjin/Elements/em_data/mec/20230321_MEC/wafer52/out/align/refine/11867_32nm_comp_flow")


if __name__ == '__main__':
    # main()
    # get_flow_range("/CX/neuro_segment/user/jinhaiqun/out/mec/align_imgs/refine/03133_8nm_flow", 12, 37, 15, 40)
    decay_flows("/CX/neuro_segment/user/jinhaiqun/out/mec/align_imgs/refine/03133_8nm_flow", "/CX/neuro_segment/user/jinhaiqun/out/mec/align_imgs/refine/03134_8nm_flow", 0.9)