import os
import cv2
import json
import math
import numpy as np
import skimage
import argparse
import shutil
import re
from multiprocessing import Pool
from concurrent.futures import ThreadPoolExecutor
from PIL import Image  # 用cv2读取大图可能会失败，所以需要PIL库，具体用法：src_img = np.array(Image.open(src_img_path))
Image.MAX_IMAGE_PIXELS = None
import file_utils
import img_utils

def get_row_col_idx(file_path):
    """根据图片的名称获取行列索引信息, 名称要遵循*_tr*-tc*.png命名标准, 第二个*表示行索引，第三个*表示列索引"""
    file_name = os.path.basename(file_path)
    tmp_split_list = file_name.split("tr")
    try:
        row_idx = int(tmp_split_list[1].split("-")[0])
        col_idx = int(tmp_split_list[1].split("tc")[1].split(".")[0])
    except Exception as err:
        raise Exception("Error! File name should be like: *_tr1-tc2.png, but this file name is {}".format(file_name))
    return row_idx, col_idx

def gen_empty_img_in_dirs(src_dir, empty_img_value=255):
    """FEABAS render出来的空的区域都不保存png, 对此, 这个函数是统计文件夹下没有生成png的图有哪些, 并补成png"""
    src_dir = file_utils.get_abs_dir(src_dir)

    # 有两种情况：
    # 第一种是src_dir就是一个单独的文件夹，里面全是png
    # 第二种是src_dir包含子文件夹，如0_s0001，0_s0002，子文件夹下全是png
    has_sub_dir = False
    sub_dirs = []
    for f in os.listdir(src_dir):
        abs_f = os.path.join(src_dir, f)
        if os.path.isdir(abs_f):
            sub_dirs.append(abs_f)
            has_sub_dir = True

    min_row, min_col = np.iinfo(np.int32).max, np.iinfo(np.int32).max
    max_row, max_col = 0, 0
    dir_imgs_dict = {}
    if not has_sub_dir:  # 第一种情况
        imgs_list = []
        for f in os.listdir(src_dir):
            # abs_f = os.path.join(src_dir, f)
            if "tr" in f and "tc" in f and f.rsplit(".", 1)[1] == "png":
                cur_row, cur_col = get_row_col_idx(f)
                min_row, min_col = min(min_row, cur_row), min(min_col, cur_col)
                max_row, max_col = max(max_row, cur_row), max(max_col, cur_col)
                imgs_list.append(f)
        if len(imgs_list) != 0:
            dir_imgs_dict[src_dir] = imgs_list
    else:  # 第二种情况
        for sub_dir in sub_dirs:
            imgs_list = []
            for f in os.listdir(sub_dir):
                if "tr" in f and "tc" in f and f.rsplit(".", 1)[1] == "png":
                    cur_row, cur_col = get_row_col_idx(f)
                    min_row, min_col = min(min_row, cur_row), min(min_col, cur_col)
                    max_row, max_col = max(max_row, cur_row), max(max_col, cur_col)
                    imgs_list.append(f)
            if len(imgs_list) != 0:
                dir_imgs_dict[sub_dir] = imgs_list

    if len(dir_imgs_dict) == 0:
        return

    for cur_dir, cur_imgs in dir_imgs_dict.items():
        prefix = cur_imgs[0].split("_")[0]
        shape = cv2.imread(os.path.join(cur_dir, cur_imgs[0]), 0).shape
        with open(os.path.join(cur_dir, "add_empty_img_list.txt"), 'wt') as f:
            for i in range(min_row, max_row + 1):
                for j in range(min_col, max_col + 1):
                    tmp_name = "{}_tr{}-tc{}.png".format(prefix, str(i), str(j))
                    if tmp_name not in cur_imgs:
                        tmp_img = np.ones(shape, np.uint8) * empty_img_value
                        cv2.imwrite(os.path.join(cur_dir, tmp_name), tmp_img)
                        f.writelines(tmp_name + "\n")

def get_all_block_img_name_row_col_range(mdir):
    """参数文件夹存放的必须是一张大图或全是block图片, 将大图或所有block图片名称保存在列表中, 将列表从小到大排序；
       大图或每个block图片的名称都要遵循*_tr*-tc*.png命名标准, 第一个*表示section信息, 第二个*表示行索引，第三个*表示列索引;
       根据图片名获取行列范围，返回图片名列表及行列范围"""
    mdir = file_utils.get_abs_dir(mdir)

    all_img_names = []
    min_row, min_col = np.iinfo(np.int32).max, np.iinfo(np.int32).max
    max_row, max_col = 0, 0
    for file_name in os.listdir(mdir):  # file_name如s0001_tr1-tc1.png，表示section 1的第1行第1列图像
        abs_file_name = os.path.join(mdir, file_name)
        if not os.path.isfile(abs_file_name):
            raise Exception("Error! {} is not a file!".format(abs_file_name))

        if "tr" not in file_name or "tc" not in file_name:
            continue

        cur_row, cur_col = get_row_col_idx(abs_file_name)
        min_row = min(min_row, cur_row)
        min_col = min(min_col, cur_col)
        max_row = max(max_row, cur_row)
        max_col = max(max_col, cur_col)
        all_img_names.append(abs_file_name)

    all_img_names = sorted(all_img_names)
    row_col_dict = {"min_row": min_row, "max_row": max_row, "min_col": min_col, "max_col": max_col}
    return all_img_names, row_col_dict

def check_block_imgs_integral(all_img_names, row_col_dict):
    theoretical_num = (row_col_dict["max_row"] - row_col_dict["min_row"] + 1) * (row_col_dict["max_col"] - row_col_dict["min_col"] + 1)
    actual_num = len(all_img_names)
    if theoretical_num != actual_num:
        raise Exception("Error!! Theoretical block images num is {}, but actual block images num is {}.".format(theoretical_num, actual_num))

def merge_block_imgs(all_img_names, row_col_dict, scale, merge_img_path):
    file_utils.create_save_dir_from_file(merge_img_path)

    img_prefix = all_img_names[0].split("_tr")[0]
    img_type = all_img_names[0].rsplit(".", 1)[1]

    # 获取第一行第一列图片的shape
    row1_col1_img_path = img_prefix + "_tr" + str(row_col_dict["min_row"]) + "-tc" + str(row_col_dict["min_col"]) + "." + img_type
    row1_col1_img_h, row1_col1_img_w = cv2.imread(row1_col1_img_path, 0).shape

    # 获取最后一行最后一列图片的shape
    last_img_path = img_prefix + "_tr" + str(row_col_dict["max_row"]) + "-tc" + str(row_col_dict["max_col"]) + "." + img_type
    last_img_h, last_img_w = cv2.imread(last_img_path, 0).shape

    # 计算合并大图的size，因为大图是由小图resize相应scale得到，防止小数影响总的大小，所以应该这样计算
    merge_img_h = (row_col_dict["max_row"] - row_col_dict["min_row"]) * round(row1_col1_img_h * scale) + round(last_img_h * scale)
    merge_img_w = (row_col_dict["max_col"] - row_col_dict["min_col"]) * round(row1_col1_img_w * scale) + round(last_img_w * scale)
    merge_img = np.zeros((merge_img_h, merge_img_w), np.uint8)

    # 按行列依次读取图片，并放置大图中
    cur_pos_x, cur_pos_y = 0, 0
    for i in range(row_col_dict["min_row"], row_col_dict["max_row"] + 1):
        for j in range(row_col_dict["min_col"], row_col_dict["max_col"] + 1):
            cur_img_path = "{}_tr{}-tc{}.{}".format(img_prefix, str(i), str(j), img_type)
            if not os.path.exists(cur_img_path):
                raise Exception("Error! {} is not exist!".format(cur_img_path))

            cur_img = cv2.imread(cur_img_path, 0)
            cur_img = cv2.resize(cur_img, (0, 0), fx=scale, fy=scale)
            merge_img[cur_pos_y:cur_pos_y + cur_img.shape[0], cur_pos_x:cur_pos_x + cur_img.shape[1]] = cur_img
            cur_pos_x += cur_img.shape[1]
        cur_pos_x = 0
        cur_pos_y += cur_img.shape[0]
    cv2.imwrite(merge_img_path, merge_img)

def loop_merge_block_imgs(block_imgs_dir, merge_imgs_dir, scale=0.05, process_num=1):
    block_imgs_dir = file_utils.get_abs_dir(block_imgs_dir)
    merge_imgs_dir = file_utils.create_dir(merge_imgs_dir)

    pool = Pool(processes=process_num)
    res_l = []
    for sub_dir in os.listdir(block_imgs_dir):  # sub_dir like: 0_s0001
        abs_sub_dir = os.path.join(block_imgs_dir, sub_dir)
        if not os.path.isdir(abs_sub_dir):
            continue

        all_img_names, row_col_dict = get_all_block_img_name_row_col_range(abs_sub_dir)
        if not all_img_names:
            continue

        check_block_imgs_integral(all_img_names, row_col_dict)
        merge_img_name = os.path.basename(all_img_names[0]).split("_tr")[0] + "." + os.path.basename(all_img_names[0]).rsplit(".", 1)[1]
        merge_img_name = os.path.join(merge_imgs_dir, merge_img_name)
        res = pool.apply_async(merge_block_imgs, (all_img_names, row_col_dict, scale, merge_img_name))
        res_l.append(res)

    pool.close()
    pool.join()

    for i in res_l:
        res = i.get()
    print("Done!")

def add_row_col_idx(src_dir, add_row_idx=1, add_col_idx=1):
    src_dir = file_utils.get_abs_dir(src_dir)
    for sub_dir in os.listdir(src_dir):
        abs_sub_dir = os.path.join(src_dir, sub_dir)
        if not os.path.isdir(abs_sub_dir):
            continue

        img_paths = [os.path.join(abs_sub_dir, f) for f in os.listdir(abs_sub_dir) if f.rsplit(".", 1)[1].lower() == "png"]
        assert len(img_paths) > 0, "Error! No png file in {}".format(abs_sub_dir)
        prefix = os.path.basename(img_paths[0]).split("_tr")[0]

        # 使用正则表达式提取行列信息
        pattern = re.compile(r"tr(\d+)-tc(\d+)")
        row_col_dict = file_utils.get_row_col_info(img_paths, pattern)
        min_row_idx, max_row_idx = row_col_dict["min_row"], row_col_dict["max_row"]
        min_col_idx, max_col_idx = row_col_dict["min_col"], row_col_dict["max_col"]

        for i in range(max_row_idx, min_row_idx - 1, -1):
            for j in range(max_col_idx, min_col_idx - 1, -1):
                img_name = "{}_tr{}-tc{}.png".format(prefix, str(i), str(j))
                new_img_name = "{}_tr{}-tc{}.png".format(prefix, str(i + add_row_idx), str(j + add_col_idx))
                if not os.path.exists(os.path.join(abs_sub_dir, img_name)):
                    print("{} is not exist!".format(img_name))
                else:
                    shutil.move(os.path.join(abs_sub_dir, img_name), os.path.join(abs_sub_dir, new_img_name))

def rotate_and_save_tiles(src_dir, dst_dir, angle, keep_true_size=True, interpolation=cv2.INTER_CUBIC, border_value=255, thread_num=32):
    src_dir = file_utils.get_abs_dir(src_dir)
    sec_dir_list = [f for f in os.listdir(src_dir) if os.path.isdir(os.path.join(src_dir, f))]
    for sec_dir in sec_dir_list:

        abs_sec_dir = os.path.join(src_dir, sec_dir)
        img_path_list = [os.path.join(abs_sec_dir, f) for f in os.listdir(abs_sec_dir) if f.rsplit(".", 1)[1].lower() == "png"]
        assert len(img_path_list) > 0, "Error! No png file in {}".format(sec_dir)
        prefix = os.path.basename(img_path_list[0]).split("_tr")[0]
        img_shape = cv2.imread(img_path_list[0], 0).shape

        # 使用正则表达式提取行列信息
        pattern = re.compile(r"tr(\d+)-tc(\d+)")
        row_col_dict = file_utils.get_row_col_info(img_path_list, pattern)
        min_row_idx, max_row_idx = row_col_dict["min_row"], row_col_dict["max_row"]
        min_col_idx, max_col_idx = row_col_dict["min_col"], row_col_dict["max_col"]
        new_whole_img = np.zeros(((max_row_idx - min_row_idx + 1) * img_shape[0], (max_col_idx - min_col_idx + 1) * img_shape[1]), np.uint8)

        # 按行列依次读取图片，并放置大图中
        for i in range(min_row_idx, max_row_idx + 1):
            for j in range(min_col_idx, max_col_idx + 1):
                img_name = "{}_tr{}-tc{}.png".format(prefix, i, j)
                if not os.path.exists(os.path.join(abs_sec_dir, img_name)):
                    print("{} is not exist!".format(img_name))
                else:
                    img = cv2.imread(os.path.join(abs_sec_dir, img_name), 0)
                    new_whole_img[(i - min_row_idx) * img_shape[0]:(i - min_row_idx + 1) * img_shape[0], (j - min_col_idx) * img_shape[1]:(j - min_col_idx + 1) * img_shape[1]] = img

        # 对大图进行旋转
        new_whole_img = img_utils.rotate_img(new_whole_img, angle, keep_true_size, interpolation, border_value)
        # 计算旋转后大图的行列数
        new_rows = int(np.ceil(new_whole_img.shape[0] / img_shape[0]))
        new_cols = int(np.ceil(new_whole_img.shape[1] / img_shape[1]))

        dst_sec_dir = os.path.join(dst_dir, sec_dir)
        os.makedirs(dst_sec_dir, exist_ok=True)

        def save_tile(row, col):
            y0 = row * img_shape[0]
            y1 = min((row + 1) * img_shape[0], new_whole_img.shape[0])
            x0 = col * img_shape[1]
            x1 = min((col + 1) * img_shape[1], new_whole_img.shape[1])

            tile = new_whole_img[y0:y1, x0:x1]

            if tile.shape != (img_shape[0], img_shape[1]):
                pad_tile = np.ones((img_shape[0], img_shape[1]), dtype=np.uint8) * border_value
                pad_tile[:tile.shape[0], :tile.shape[1]] = tile
                tile = pad_tile

            out_name = "{}_tr{}-tc{}.png".format(prefix, row + min_row_idx, col + min_col_idx)  # 虽然和原图的行列数不一样，但是最小索引保持相同
            out_path = os.path.join(dst_sec_dir, out_name)
            cv2.imwrite(out_path, tile)

        with ThreadPoolExecutor(max_workers=thread_num) as executor:
            tasks = []
            for row in range(new_rows):
                for col in range(new_cols):
                    tasks.append(executor.submit(save_tile, row, col))

            for task in tasks:
                task.result()

        print(f"{sec_dir} Done!")

            

def main():
    parser = argparse.ArgumentParser(description="This is a auxiliary tool.")
    parser.add_argument("-i", "--in_dir", type=str, default="/media/hqjin/Elements/em_data", help="source dir")
    parser.add_argument("-o", "--out_dir", type=str, default="/media/hqjin/Elements/em_data", help="output dir")
    parser.add_argument("-p", "--process_num", type=int, default=16, help="the number of processes to use(default: 16)")
    args = parser.parse_args()

    # gen_empty_img_in_dirs("/LSEM/user/jinhaiqun/output/mec/20230321_MEC/wafer19/work_dir/aligned_stack/mip1", empty_img_value=255)
    # add_row_col_idx("/LSEM/user/jinhaiqun/output/mec/20230321_MEC/wafer19/work_dir/aligned_stack/mip1", add_row_idx=1, add_col_idx=1)
    # loop_merge_block_imgs("/LSEM/user/jinhaiqun/output/mec/20230321_MEC/wafer19/work_dir/aligned_stack/mip1",
    # "/LSEM/user/jinhaiqun/output/mec/20230321_MEC/wafer19/work_dir/aligned_stack/mip1-1", scale=0.0625, process_num=10)

    rotate_and_save_tiles("/LSEM/user/jinhaiqun/output/mec/20230321_MEC/wafer19/work_dir/aligned_stack/mip1",
    "/LSEM/user/jinhaiqun/output/mec/20230321_MEC/wafer19/work_dir/aligned_stack/mip1-2", 12, keep_true_size=True, interpolation=cv2.INTER_CUBIC, border_value=255, thread_num=32)

if __name__ == '__main__':
    main()
