from skimage.feature import ORB, match_descriptors
from skimage.io import imread, imsave
from skimage.measure import ransac
from skimage.transform import ProjectiveTransform, SimilarityTransform, warp
from skimage.color import rgb2gray, gray2rgb
from skimage import img_as_ubyte
import numpy as np


# Load images (convert to grayscale only if needed)
image0 = imread('images/goldengate1.png')
image1 = imread('images/goldengate2.png')

# Convert to grayscale if the image is RGB
if image0.ndim == 3:
    image0 = rgb2gray(image0)
if image1.ndim == 3:
    image1 = rgb2gray(image1)

# ORB feature detection and descriptor extraction
orb = ORB(n_keypoints=1000, fast_threshold=0.05)
orb.detect_and_extract(image0)
keypoints1, descriptors1 = orb.keypoints, orb.descriptors

orb.detect_and_extract(image1)
keypoints2, descriptors2 = orb.keypoints, orb.descriptors

# Match descriptors between the two images
matches12 = match_descriptors(descriptors1, descriptors2, cross_check=True)

# Extract coordinates of matched keypoints
src = keypoints2[matches12[:, 1]][:, ::-1]
dst = keypoints1[matches12[:, 0]][:, ::-1]

# Estimate the projective transformation model using RANSAC
transform_model, inliers = ransac((src, dst), ProjectiveTransform, min_samples=4, residual_threshold=2)

# Get shape of the second image
r, c = image1.shape[:2]

# Compute the corners of the warped image
corners = np.array([[0, 0], [0, r], [c, 0], [c, r]])
warped_corners = transform_model(corners)
all_corners = np.vstack((warped_corners, corners))

# Find the min and max corners to determine output shape
corner_min = np.min(all_corners, axis=0)
corner_max = np.max(all_corners, axis=0)
output_shape = np.ceil((corner_max - corner_min)[::-1])

# Offset transformation to adjust image positions
offset = SimilarityTransform(translation=-corner_min)

# Warp both images with the transformations and offsets
image0_warp = warp(image0, offset.inverse, output_shape=output_shape, cval=-1)
image1_warp = warp(image1, (transform_model + offset).inverse, output_shape=output_shape, cval=-1)

# Create alpha masks for both images
image0_mask = (image0_warp != -1)
image0_warp[~image0_mask] = 0
image0_alpha = np.dstack((gray2rgb(image0_warp), image0_mask))

image1_mask = (image1_warp != -1)
image1_warp[~image1_mask] = 0
image1_alpha = np.dstack((gray2rgb(image1_warp), image1_mask))

# Merge the two images
merged = image0_alpha + image1_alpha
alpha = merged[..., 3]
merged[..., :3] /= np.maximum(alpha, 1)[..., np.newaxis]

# Scale the merged image to uint8 and save
merged_uint8 = img_as_ubyte(merged[..., :3])
imsave('images/output.jpg', merged_uint8)
