import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))

import re
import time
import argparse
import numpy as np
import cv2
from PIL import Image
from glob import glob
import math
import torch
import torch.nn.functional as F
import inspect  # 用于获取函数信息
from core.utils import frame_utils, flow_viz
from core.FlowFormer import build_flowformer
from evaluate_FlowFormer_tile import InputPadder
from configs.submissions import get_cfg

TRAIN_SIZE = [432, 960]

def compute_grid_indices(image_shape, patch_size=TRAIN_SIZE, min_overlap=20):
    if min_overlap >= TRAIN_SIZE[0] or min_overlap >= TRAIN_SIZE[1]:
        raise ValueError(
            f"Overlap should be less than size of patch (got {min_overlap}"
            f"for patch size {patch_size}).")
    if image_shape[0] == TRAIN_SIZE[0]:
        hs = list(range(0, image_shape[0], TRAIN_SIZE[0]))
    else:
        hs = list(range(0, image_shape[0], TRAIN_SIZE[0] - min_overlap))
    if image_shape[1] == TRAIN_SIZE[1]:
        ws = list(range(0, image_shape[1], TRAIN_SIZE[1]))
    else:
        ws = list(range(0, image_shape[1], TRAIN_SIZE[1] - min_overlap))

    # Make sure the final patch is flush with the image boundary
    hs[-1] = image_shape[0] - patch_size[0]
    ws[-1] = image_shape[1] - patch_size[1]
    return [(h, w) for h in hs for w in ws]

def compute_weight(hws, image_shape, patch_size=TRAIN_SIZE, sigma=1.0, wtype='gaussian'):
    patch_num = len(hws)
    h, w = torch.meshgrid(torch.arange(patch_size[0]), torch.arange(patch_size[1]))
    h, w = h / float(patch_size[0]), w / float(patch_size[1])
    c_h, c_w = 0.5, 0.5
    h, w = h - c_h, w - c_w
    weights_hw = (h ** 2 + w ** 2) ** 0.5 / sigma
    denorm = 1 / (sigma * math.sqrt(2 * math.pi))
    weights_hw = denorm * torch.exp(-0.5 * (weights_hw) ** 2)

    weights = torch.zeros(1, patch_num, *image_shape)
    for idx, (h, w) in enumerate(hws):
        weights[:, idx, h:h + patch_size[0], w:w + patch_size[1]] = weights_hw
    weights = weights.cuda()
    patch_weights = []
    for idx, (h, w) in enumerate(hws):
        patch_weights.append(weights[:, idx:idx + 1, h:h + patch_size[0], w:w + patch_size[1]])
    return patch_weights

def compute_flow(model, image1, image2, weights=None):
    # print(f"computing flow...")

    image_size = image1.shape[1:]
    # image1, image2 = image1[None].cuda(), image2[None].cuda()
    image1, image2 = image1[None], image2[None]

    hws = compute_grid_indices(image_size)
    if weights is None:  # no tile
        padder = InputPadder(image1.shape)
        image1, image2 = padder.pad(image1, image2)

        flow_pre, _ = model(image1, image2)

        flow_pre = padder.unpad(flow_pre)
        flow = flow_pre[0].permute(1, 2, 0).cpu().detach().numpy()
    else:  # tile
        flows = 0
        flow_count = 0

        for idx, (h, w) in enumerate(hws):
            image1_tile = image1[:, :, h:h + TRAIN_SIZE[0], w:w + TRAIN_SIZE[1]]
            image2_tile = image2[:, :, h:h + TRAIN_SIZE[0], w:w + TRAIN_SIZE[1]]
            flow_pre, _ = model(image1_tile, image2_tile)
            padding = (w, image_size[1] - w - TRAIN_SIZE[1], h, image_size[0] - h - TRAIN_SIZE[0], 0, 0)
            flows += F.pad(flow_pre * weights[idx], padding)
            flow_count += F.pad(weights[idx], padding)

        flow_pre = flows / flow_count
        flow = flow_pre[0].permute(1, 2, 0).cpu().numpy()
    return flow

def compute_adaptive_image_size(image_size):
    target_size = TRAIN_SIZE
    scale0 = target_size[0] / image_size[0]
    scale1 = target_size[1] / image_size[1]

    if scale0 > scale1:
        scale = scale0
    else:
        scale = scale1

    image_size = (int(image_size[1] * scale), int(image_size[0] * scale))
    return image_size

def prepare_image(root_dir, viz_root_dir, fn1, fn2, keep_size):
    print(f"preparing image...")
    print(f"root dir = {root_dir}, fn = {fn1}")

    image1 = frame_utils.read_gen(os.path.join(root_dir, fn1))
    image2 = frame_utils.read_gen(os.path.join(root_dir, fn2))
    if len(np.array(image1).shape) == 2:
        image1 = cv2.cvtColor(np.array(image1), cv2.COLOR_GRAY2BGR)
    if len(np.array(image2).shape) == 2:
        image2 = cv2.cvtColor(np.array(image2), cv2.COLOR_GRAY2BGR)
    image1 = np.array(image1).astype(np.uint8)[..., :3]
    image2 = np.array(image2).astype(np.uint8)[..., :3]
    if not keep_size:
        dsize = compute_adaptive_image_size(image1.shape[0:2])
        image1 = cv2.resize(image1, dsize=dsize, interpolation=cv2.INTER_CUBIC)
        image2 = cv2.resize(image2, dsize=dsize, interpolation=cv2.INTER_CUBIC)
    image1 = torch.from_numpy(image1).permute(2, 0, 1).float()
    image2 = torch.from_numpy(image2).permute(2, 0, 1).float()

    dirname = os.path.dirname(fn1)
    filename = os.path.splitext(os.path.basename(fn1))[0]

    viz_dir = os.path.join(viz_root_dir, dirname)
    if not os.path.exists(viz_dir):
        os.makedirs(viz_dir)

    viz_fn = os.path.join(viz_dir, filename + '.png')
    return image1, image2, viz_fn

def build_model():
    print(f"building  model...")
    cfg = get_cfg()
    model = torch.nn.DataParallel(build_flowformer(cfg))
    # model = build_flowformer(cfg)
    model.load_state_dict(torch.load(cfg.model))
    # model = model.module
    # model = model.to('cpu')

    model.cuda()
    model.eval()
    return model

def visualize_flow(root_dir, viz_root_dir, model, img_pairs, keep_size):
    weights = None
    for img_pair in img_pairs:
        fn1, fn2 = img_pair
        print(f"processing {fn1}, {fn2}...")
        image1, image2, viz_fn = prepare_image(root_dir, viz_root_dir, fn1, fn2, keep_size)
        flow = compute_flow(model, image1, image2, weights)
        flow_img = flow_viz.flow_to_image(flow)
        filename = os.path.basename(viz_fn)
        destination_path = os.path.join('/home/crxc/disk/emReg/wraped_data', filename)
        destination_path2 = os.path.join('/home/crxc/disk/emReg/wraped_data', "dvf.png")
        wrap_img = flow_viz.wrap(image2.numpy(), flow)
        cv2.imwrite(destination_path2, flow_img[:, :, [2, 1, 0]])
        cv2.imwrite(destination_path, wrap_img[:, :, [2, 1, 0]])

def process_sintel(sintel_dir):
    img_pairs = []
    for scene in os.listdir(sintel_dir):
        dirname = os.path.join(sintel_dir, scene)
        image_list = sorted(glob(os.path.join(dirname, '*.png')))
        for i in range(len(image_list) - 1):
            img_pairs.append((image_list[i], image_list[i + 1]))
    return img_pairs

def generate_pairs(dirname, start_idx, end_idx):
    img_pairs = []
    for idx in range(start_idx, end_idx):
        img1 = os.path.join(dirname, f'{idx:06}.png')
        img2 = os.path.join(dirname, f'{idx + 1:06}.png')
        img_pairs.append((img1, img2))
    return img_pairs

def format_time(seconds):
    mins, sec = divmod(seconds, 60)
    hrs, mins = divmod(mins, 60)
    return f"{int(hrs):02d}:{int(mins):02d}:{int(sec):02d}"

def process_image_pairs(fixed_dir, moving_dir, flow_output_dir, warp_output_dir=None):
    print("\033[31m<=== {} {}() start. ===>\033[0m".format(time.strftime("%Y-%m-%d %H:%M:%S"), inspect.currentframe().f_code.co_name))
    # 创建输出目录
    os.makedirs(flow_output_dir, exist_ok=True)
    if warp_output_dir:
        os.makedirs(warp_output_dir, exist_ok=True)

    # 加载模型
    model = build_model()

    # 获取固定图像文件和移动图像文件
    fixed_files = sorted([f for f in os.listdir(fixed_dir) if f.endswith('.png')])
    moving_files = sorted([f for f in os.listdir(moving_dir) if f.endswith('.png')])

    # 提取行列编号
    def extract_row_col(filename):
        match = re.search(r'tr(\d+)-tc(\d+)', filename)
        if match:
            return int(match.group(1)), int(match.group(2))
        return None

    # 创建字典存储文件名，以行列为键
    fixed_dict = {extract_row_col(file): file for file in fixed_files if extract_row_col(file)}
    moving_dict = {extract_row_col(file): file for file in moving_files if extract_row_col(file)}

    # 找到公共的行列编号
    common_keys = set(fixed_dict.keys()).intersection(set(moving_dict.keys()))

    if len(common_keys) == 0:
        raise ValueError("没有找到相同行列编号的图像对")

    for row_col in common_keys:
        fixed_image_path = os.path.join(fixed_dir, fixed_dict[row_col])
        moving_image_path = os.path.join(moving_dir, moving_dict[row_col])

        fixed_image = cv2.imread(fixed_image_path)
        moving_image = cv2.imread(moving_image_path)

        # 将图像转换为torch tensor
        fixed_image_tensor = torch.from_numpy(np.transpose(fixed_image, (2, 0, 1))).float()
        moving_image_tensor = torch.from_numpy(np.transpose(moving_image, (2, 0, 1))).float()

        # print(f"Processing image pair {fixed_dict[row_col]} and {moving_dict[row_col]}...")

        # 计算光流
        flow = compute_flow(model, fixed_image_tensor, moving_image_tensor, None)

        # # 将光流可视化为图像
        # flow_img = flow_viz.flow_to_image(flow)

        # # 使用光流对移动图像进行包裹 (暂时不需要，如果需要，取消注释)
        # wrap_img = flow_viz.wrap(moving_image, flow)

        # 生成文件路径
        flow_output_path = os.path.join(flow_output_dir, moving_dict[row_col].replace('.png', '.npy'))
        # wrap_output_path = os.path.join(warp_output_dir, moving_dict[row_col])

        # 保存结果
        np.save(flow_output_path, flow)
        # cv2.imwrite(wrap_output_path, wrap_img)
    print("\033[32m<=== {} {}() end. ===>\033[0m".format(time.strftime("%Y-%m-%d %H:%M:%S"), inspect.currentframe().f_code.co_name))

def main():
    parser = argparse.ArgumentParser(description="This is a auxiliary tool.")
    parser.add_argument("-f", "--fixed_dir", type=str, default="/CX/neuro_segment/user/jinhaiqun/out/mec/align_imgs/refine/01918_256nm_combine", help="source dir")
    parser.add_argument("-m", "--moving_dir", type=str, default="/CX/neuro_segment/user/jinhaiqun/out/mec/align_imgs/refine/01919_256nm_combine", help="output dir")
    parser.add_argument("-p", "--process_num", type=int, default=32, help="the number of processes to use(default: 32)")
    args = parser.parse_args()
    
    flow_output_dir = "{}_flow".format(args.moving_dir.rstrip("/\\"))
    warp_output_dir = "{}_refine".format(args.moving_dir.rstrip("/\\"))
    process_image_pairs(args.fixed_dir, args.moving_dir, flow_output_dir, warp_output_dir)

if __name__ == '__main__':
    main()

    # 示例：第一阶段
    # fixed_dir = r'/LSEM/user/jinhaiqun/output/mec/20230321_MEC/all_align_w20-1/refine_test/07253_256nm_combine'
    # moving_dir = r'/LSEM/user/jinhaiqun/output/mec/20230321_MEC/all_align_w20-1/refine_test/07254_256nm_combine'
    # flow_output_dir = r'/LSEM/user/jinhaiqun/output/mec/20230321_MEC/all_align_w20-1/refine_test/07254_256nm_combine_flow'
    # warp_output_dir = r'/LSEM/user/jinhaiqun/output/mec/20230321_MEC/all_align_w20-1/refine_test/07254_256nm_combine_refine'
    # process_image_pairs(fixed_dir, moving_dir, flow_output_dir, warp_output_dir)

    # 示例：第二阶段
    # fixed_dir = r'/LSEM/user/jinhaiqun/output/mec/20230321_MEC/all_align_w20-1/refine_test/07253_32nm_split2'
    # moving_dir = r'/LSEM/user/jinhaiqun/output/mec/20230321_MEC/all_align_w20-1/refine_test/07254_32nm_coarse_split2'
    # flow_output_dir = r'/LSEM/user/jinhaiqun/output/mec/20230321_MEC/all_align_w20-1/refine_test/07254_32nm_coarse_split2_flow'
    # warp_output_dir = r'/LSEM/user/jinhaiqun/output/mec/20230321_MEC/all_align_w20-1/refine_test/07254_32nm_coarse_split2_refine'
    # process_image_pairs(fixed_dir, moving_dir, flow_output_dir, warp_output_dir)
