import cv2
import numpy as np
from scipy.signal import convolve2d

class RetinalFeatureMatcher:
    def __init__(self,
                 base_image,
                 base_optic_coor,
                 kernel_size=64, 
                 stride=16,
                 threshold=60,
                 min_regions=1,
                 max_region=7,
                 match_threshold=0.6,
                 base_mask_threshold=0.4,
                 diffusion_val=2):
        '''
        base_image: 基准图像（完整的血管分割图）: array after resize
        kernel_size: 卷积核大小，用于检测血管密集区域
        stride: 卷积步幅
        threshold: 检测血管密集区域的阈值
        min_regions: 至少保留的血管区域数
        '''
        self.base_image = base_image
        self.kernel_size = kernel_size
        self.stride = stride
        self.threshold = threshold
        self.min_regions = min_regions
        self.match_threshold=match_threshold
        self.max_region=max_region
        self.image_height=self.base_image.shape[0]
        self.image_width=self.base_image.shape[1]
        self.base_mask_threshold=base_mask_threshold
        self.diffusion_val=diffusion_val
        self.base_optic_coor=base_optic_coor
        
    def set_base_image(self, base_image,base_optic_coor):
        ''' 设置基准图像 '''
        self.base_image = base_image
        self.image_height=self.base_image.shape[0]
        self.image_width=self.base_image.shape[1]
        self.base_optic_coor=base_optic_coor
    def generate_base_mask(self,coor_target,image_resolu,padding=20):
        ''' 
        获取匹配区域的坐标 
        coor_base: 基准图像的感兴趣区域坐标
        coor_target: 目标图像的感兴趣区域坐标
        '''
        coor_base=self.base_optic_coor
        relate_coords=(coor_base[0]-coor_target[0],coor_base[1]-coor_target[1])
        
        coor_in_base=(coor_base[0]+relate_coords[0],coor_base[1]+relate_coords[1])
        
        # 把 coor_base[0]-image_resolu//2-padding <x< coor_base[0]+image_resolu//2+padding
        # 和 coor_base[1]-image_resolu//2-padding <y< coor_base[1]+image_resolu//2+padding
        # 设置为1 其他为 0
        mask_base=np.zeros((self.image_height,self.image_width),dtype=np.uint8)
        mask_base[coor_in_base[1]-image_resolu//2-padding:coor_in_base[1]+image_resolu//2+padding,
                    coor_in_base[0]-image_resolu//2-padding:coor_in_base[0]+image_resolu//2+padding]=1
        return mask_base
    def generate_tar_mask(self,image):
        # 创建一个基于阈值的掩码 (二值化处理)
        base_mask = np.where(image > self.base_mask_threshold * 255, 1, 0)

        # 用一个 diffusion*2+1 的卷积核进行膨胀操作
        kernel = np.ones((self.diffusion_val * 2 + 1, self.diffusion_val * 2 + 1), dtype=np.float32)
        base_mask = convolve2d(base_mask.astype(np.float32), kernel, mode='same', boundary='fill', fillvalue=0)

        # 将结果重新二值化处理，确保只有 0 和 1
        base_mask = np.where(base_mask > 0, 1, 0)

        # 将掩码转换为 8 位无符号整数类型 (CV_8U)
        base_mask = (base_mask * 255).astype(np.uint8)

        return base_mask
    def get_coor(self, i, j):
        ''' 将卷积坐标 (i, j) 转换为原图坐标区域 (xmin, xmax, ymin, ymax) '''
        center_y = i * self.stride
        center_x = j * self.stride
        
        half_kernel = self.kernel_size // 2
        
        ymin = max(center_y - half_kernel, 0)
        xmin = max(center_x - half_kernel, 0)
        ymax = min(center_y + half_kernel, self.image_height-1)
        xmax = min(center_x + half_kernel, self.image_width-1)
        
        return (xmin, xmax, ymin, ymax)

    def select_region(self, image):
        ''' 
        选择血管丰富的区域并返回感兴趣区域的坐标 
        image: 传入的目标图像分割图（血管分割图）
        '''
        # 使用卷积实现均值滤波
        kernel = np.ones((self.kernel_size, self.kernel_size), dtype=np.float32) 
        vessel_density = convolve2d(image.astype(np.float32), kernel, mode='same', boundary='fill', fillvalue=0)
        
        # 下采样以提高速度
        vessel_density = vessel_density[::self.stride, ::self.stride]
        selected_regions = []
        cnt=0
        while cnt<self.max_region:
            max_val = vessel_density.max()
            print(max_val)
            if max_val < self.threshold and len(selected_regions) >= self.min_regions:
                break
            
            i, j = np.unravel_index(np.argmax(vessel_density), vessel_density.shape)
            bbox = self.get_coor(i, j)
            selected_regions.append(bbox)

            # 清除8邻域，避免选择相邻区域
            for di in [-1, 0, 1]:
                for dj in [-1, 0, 1]:
                    ni, nj = i + di, j + dj
                    if 0 <= ni < vessel_density.shape[0] and 0 <= nj < vessel_density.shape[1]:
                        vessel_density[ni, nj] = 0
            cnt+=1 
        return selected_regions
    def visual_region(self, image, regions, save_dir=None):
        ''' 
        可视化感兴趣区域并将其保存到指定目录 
        image: 目标图像
        regions: 感兴趣区域的坐标列表
        save_dir: 保存路径（如果提供）
        '''
        if save_dir is not None:
            os.makedirs(save_dir, exist_ok=True)
        
        for idx, (xmin, xmax, ymin, ymax) in enumerate(regions):
            region = image[ymin:ymax, xmin:xmax]
            
            if save_dir:
                save_path = os.path.join(save_dir, f'{idx + 1}.jpg')
                cv2.imwrite(save_path, region)
                print(f"感兴趣区域 {idx + 1} 已保存到: {save_path}")
    def match_region(self,base_after_mask, target_image):
        ''' 
        使用模板匹配在目标图像中找到与基准区域最匹配的位置 
        base_region: 基准图像的感兴趣区域
        target_image: 目标图像的血管分割图
        '''
        # 确保掩码的大小与 self.base_image 匹配
        # if self.base_mask.shape != self.base_image.shape:
        #     raise ValueError("Base mask 和 base image 尺寸不匹配")
        mask = self.generate_tar_mask(target_image)
        # 将模板匹配方法应用到 self.base_image 中，使用 mask 限制匹配范围
        result = cv2.matchTemplate(base_after_mask, target_image, method=cv2.TM_CCOEFF_NORMED, mask=mask)

        # 找到匹配结果中的最大值及其位置
        _, max_val, _, max_loc = cv2.minMaxLoc(result)
        return max_loc, max_val
    
    def match_interest_regions(self, target_image,target_optic_coor):
        ''' 
        匹配所有选定的感兴趣区域 
        target_image: 目标图像分割图
        '''
        interest_regions = []
        tar_regions = self.select_region(target_image)
        base_mask=self.generate_base_mask(target_optic_coor,image_resolu=target_image.shape[0])
        # visual_region
        self.visual_region(target_image, tar_regions, save_dir='./experiments/visual_match/interest_region')
        base_after_mask=base_mask*self.base_image
        for (xmin, xmax, ymin, ymax) in tar_regions:
            target_region = target_image[ymin:ymax, xmin:xmax]
            # if target_region.shape[0] < self.kernel_size or target_region.shape[1] < self.kernel_size:
            #     continue  # 跳过边缘不完整的区域
            # 根据base_mask进行筛选
            
            match_loc, match_val = self.match_region(base_after_mask,target_region)
            if match_val<self.match_threshold:
                continue
            interest_regions.append({
                "target_coords": (xmin, xmax, ymin, ymax),
                "match_coords": (match_loc[0], match_loc[0] + self.kernel_size, match_loc[1], match_loc[1] + self.kernel_size),
                "match_value": match_val
            })
        return interest_regions

    def visual_match(self, target_image, matches, line_connect=True, save_path=None):
        ''' 
        可视化匹配结果，将两个图像拼接，并绘制匹配框
        target_image: 目标图像
        matches: 匹配结果列表
        line_connect: 是否使用线连接匹配的框
        save_path: 保存路径（如果提供）
        '''
        # 如果图像是单通道（灰度图像），转换为三通道
        if len(target_image.shape) == 2:
            target_image = cv2.cvtColor(target_image, cv2.COLOR_GRAY2BGR)
        if len(self.base_image.shape) == 2:
            base_image = cv2.cvtColor(self.base_image, cv2.COLOR_GRAY2BGR)
        else:
            base_image = self.base_image    

        # 拼接图像
        combined_image = np.concatenate((target_image, base_image), axis=1)
        offset = target_image.shape[1]  # 用于将 base 图像的 x 坐标偏移 

        # 绘制匹配框
        for match in matches:
            # 获取目标图像和基准图像的坐标
            xmin_t, xmax_t, ymin_t, ymax_t = match['target_coords']
            xmin_b, xmax_b, ymin_b, ymax_b = match['match_coords']  

            # 在目标图像中绘制红色矩形
            cv2.rectangle(combined_image, (xmin_t, ymin_t), (xmax_t, ymax_t), (0, 0, 255), 2)   

            # 在基准图像中绘制蓝色矩形，偏移 x 坐标
            cv2.rectangle(combined_image, (xmin_b + offset, ymin_b), (xmax_b + offset, ymax_b), (255, 0, 0), 2) 

            # 使用线连接匹配的框
            if line_connect:
                center_t = ((xmin_t + xmax_t) // 2, (ymin_t + ymax_t) // 2)
                center_b = ((xmin_b + xmax_b) // 2 + offset, (ymin_b + ymax_b) // 2)
                cv2.line(combined_image, center_t, center_b, (0, 255, 0), 1)    

        # 保存图像到路径
        if save_path:
            cv2.imwrite(save_path, combined_image)
            print(f"匹配可视化结果已保存到: {save_path}")   

        # 显示图像（可选）
        # cv2.imshow("Matches", combined_image)
        # cv2.waitKey(0)
        # cv2.destroyAllWindows()   

# 示例使用
if __name__ == "__main__":
    import os,json
    data_dir='./experiments/dataset/vascular'
    base_image='1243.jpg'
    target_image='1244.jpg'
    image_resolu=512
    data_dict_path='../Dataset/infantImages/annotations.json'
    with open(data_dict_path,'r') as f:
        data_dict=json.load(f)
    base_optic_coor=data_dict[base_image]['optic_disc_pred']['position']
    target_optic_coor=data_dict[target_image]['optic_disc_pred']['position']
    # 读取基准图像和目标图像
    base_image = cv2.imread(os.path.join(data_dir, base_image), cv2.IMREAD_GRAYSCALE)
    target_image = cv2.imread(os.path.join(data_dir, target_image), cv2.IMREAD_GRAYSCALE)
    
    
    # resize 
    base_image = cv2.resize(base_image, (image_resolu, image_resolu))
    target_image = cv2.resize(target_image, (image_resolu, image_resolu))
    
    # filer i<0.453 i=0 for image
    base_image[base_image<(0.40*255)]=0
    target_image[target_image<(0.40*255)]=0
    
    # 创建 RetinalFeatureMatcher 对象
    feature_matcher = RetinalFeatureMatcher(base_image,base_optic_coor=base_optic_coor,kernel_size=128)
    
    # 匹配感兴趣区域
    matches = feature_matcher.match_interest_regions(target_image,target_optic_coor=target_optic_coor)
    
    save_dir= './experiments/visual_match'
    os.makedirs(save_dir,exist_ok=True)
    # 可视化匹配结果
    feature_matcher.visual_match(target_image, matches, save_path=os.path.join(save_dir, 'match_result.jpg'))