import cv2
import numpy as np
import imageio
import matplotlib.pyplot as plt
import time
from skimage import measure
from skimage import transform
from PIL import Image
import os







# imgfile1 = 'C:/Users/ZHQ/Desktop/图像配准任务/geshi/@7.091556969883849@477.6362882005051@335.8500511161983@-89.9@-59.9@0.0@113.532608456@34.816178958@335.868@.png'
# imgfile2 = 'C:/Users/ZHQ/Desktop/图像配准任务/gen_masks_data/images/@7.091556969883849@477.6362882005051@335.8500511161983@-89.9@-59.9@0.0@113.532608456@34.816178958@335.868@.jpg'
# #"C:\Users\ZHQ\Desktop\图像配准任务\gen_masks_data\simmasks\@7.091556969883849@477.6362882005051@335.8500511161983@-89.9@-59.9@0.0@113.532608456@34.816178958@335.868@.png"
# img_maskfile1='C:/Users/ZHQ/Desktop/图像配准任务/geshi1/@7.091556969883849@477.6362882005051@335.8500511161983@-89.9@-59.9@0.0@113.532608456@34.816178958@335.868@.png'

rootpath = "H:/data/zzdxOSDB/sim_real/out_20240716/"
image1_path = rootpath + "simimages/"# 原始路径
image2_path = rootpath + "images/"# 原始路径
mask1_path = rootpath + "simmasks/"# 原始路径
mask2_path = rootpath + 'simimages_out/'# 保存路径
mask3_path = rootpath + 'simmasks_out/'# 保存路径



#计算文件夹下的图像数量，输入为文件夹路径
def count_images(path):
    image_count = 0

    # 遍历文件夹内的所有文件
    for file in os.listdir(path):
        if file.endswith(".jpg") or file.endswith(".png"):  # 这里仅考虑了常见的图片格式，可以根据需要修改
            image_count += 1

    return image_count

#计算单张图像对应的mask
def Big_mask2Little_mask(imgfile1,imgfile2,img_maskfile1,img_maskfile2,img_maskfile3):#输入的都是单张图像的地址
    _RESIDUAL_THRESHOLD = 30
    # read image
    image1 = imageio.imread(imgfile1)
    image2 = imageio.imread(imgfile2)
    image1_mask=imageio.imread(img_maskfile1)
    #print(image1)

    ##SIFT
    getter = cv2.SIFT_create();
    kps_left, des_left = getter.detectAndCompute(image1,None) # 计算哪张图片的用哪张图片的关键点。
    kps_right, des_right = getter.detectAndCompute(image2,None)


    #Flann特征匹配
    FLANN_INDEX_KDTREE = 1
    index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
    search_params = dict(checks=40)
    flann = cv2.FlannBasedMatcher(index_params, search_params)
    matches = flann.knnMatch(des_left, des_right, k=2)
    good_matches = []
    locations_1_to_use = []
    locations_2_to_use = []
    for m, n in matches:
        if m.distance < 0.9 * n.distance:
        #if True:
            good_matches.append(m)
            p2 = kps_right[m.trainIdx]
            p1 = kps_left[m.queryIdx]
            locations_1_to_use.append([p1.pt[0], p1.pt[1]])
            locations_2_to_use.append([p2.pt[0], p2.pt[1]])

    if len(locations_1_to_use) < 10:
        return 
    locations_1_to_use = np.array(locations_1_to_use)
    locations_2_to_use = np.array(locations_2_to_use)

    _, inliers = measure.ransac((locations_1_to_use, locations_2_to_use),
                              transform.AffineTransform,
                              min_samples=3,
                              residual_threshold=_RESIDUAL_THRESHOLD,
                              max_trials=1000)

    inlier_idxs = np.nonzero(inliers)[0]
    ##
    pts1 = []
    pts2 = []
    good_matches_2 = []
    for idx in inlier_idxs:
        pt1 = locations_1_to_use[idx]
        pt2 = locations_2_to_use[idx]
        pts1.append([pt1])
        pts2.append([pt2])
        good_matches_2.append(good_matches[idx])

    ptsA = np.float32(pts1)
    ptsB = np.float32(pts2)

    ransacReprojThreshold = 4
    #H, status =cv2.findHomography(ptsA,ptsB,cv2.RANSAC);
    H, status =cv2.findHomography(ptsB,ptsA,cv2.RANSAC);#从大图像映射到小图像
    #其中H为求得的单应性矩阵矩阵
    #status则返回一个列表来表征匹配成功的特征点。
    #ptsA,ptsB为关键点
    #cv2.RANSAC, ransacReprojThreshold这两个参数与RANSAC有关
    #H = cv2.getPerspectiveTransform(ptsA,ptsB)
    #imgout可以看做是小mask在大mask中的范围
    imgOut = cv2.warpPerspective(image1, H, (image2.shape[1],image2.shape[0]),flags=cv2.INTER_LINEAR + cv2.WARP_INVERSE_MAP)
    imgOutMask = cv2.warpPerspective(image1_mask, H, (image2.shape[1],image2.shape[0]),flags=cv2.INTER_NEAREST + cv2.WARP_INVERSE_MAP)


    # imgOut = cv2.warpAffine(image2, M[0], (image1.shape[1],image1.shape[0]),flags=cv2.INTER_LINEAR + cv2.WARP_INVERSE_MAP)
    #H=np.linalg.inv(H)
    #imgOut_mask1 = cv2.warpPerspective(image1_mask, H, (imgOut.shape[1],imgOut.shape[0]),flags=cv2.WARP_INVERSE_MAP)
    imgOut = imgOut[:,:,0:3]
    imgOutMask = imgOutMask[:,:,0:3]

    # overlapping = cv2.addWeighted(image2, 0, imgOut, 1, 0)
    filename = os.path.basename(img_maskfile1)
    Image.fromarray(imgOut).save(img_maskfile2+filename)
    Image.fromarray(imgOutMask).save(img_maskfile3+filename)

    # imgOut = imgOut[:,:,0:3]
    # imgOut[:,:,[1,2]] = 0
    # image2[:,:,[0]] = 0
    # overlapping = cv2.addWeighted(image2, 0.5, imgOut, 0.5, 0)
    # plt.rcParams['savefig.dpi'] = 100 #图片像素
    # #plt.rcParams['figure.dpi'] = 100 #分辨率
    # #plt.rcParams['figure.figsize'] = (16.0, 9.0) # 设置figure_size尺寸
    # plt.imshow(overlapping)
    # filename = os.path.basename(img_maskfile1)
    # plt.savefig(img_maskfile2+filename)
    # plt.show()
    # return

from tqdm import tqdm 
def main(df_image1_path,df_image2_path,df_mask1_path,df_mask2_path,df_mask3_path):
    count=count_images(image1_path)
    #目录路径
    all_image1s = os.listdir(df_image1_path)
    all_image2s = os.listdir(df_image2_path)
    all_mask1s = os.listdir(df_mask1_path)

    for i in tqdm(range(count)):
        image1 = os.path.join(df_image1_path,all_image1s[i])
        image2 = os.path.join(df_image2_path, all_image2s[i])
        mask1 = os.path.join(df_mask1_path, all_mask1s[i])
        mask2 = df_mask2_path
        
        filename = os.path.basename(mask1)
        if os.path.exists(df_mask3_path + filename):
            continue

        Big_mask2Little_mask(image1, image2, mask1, mask2,df_mask3_path)

main(image1_path,image2_path,mask1_path,mask2_path,mask3_path)

