import cv2
import numpy as np
import keras
from keras.layers import Input, Dense
from keras.models import Model
import matplotlib.pyplot as plt

# Load images
source_image = cv2.imread('E:/test/99.jpg')
target_image = cv2.imread('E:/test/9.jpg')

# Create FAST object
fast = cv2.FastFeatureDetector_create()

# Detect keypoints using FAST
kp_source = fast.detect(source_image, None)
kp_target = fast.detect(target_image, None)

# Create SIFT object
sift = cv2.SIFT_create()

# Compute descriptors and keypoint positions
kp_source, desc_source = sift.compute(source_image, kp_source)
kp_target, desc_target = sift.compute(target_image, kp_target)

# Pad or truncate descriptors to equal number of keypoints
max_descs = max(desc_target.shape[0],desc_source.shape[0])
desc_dim = desc_source.shape[1]
desc_source = np.pad(desc_source, ((0, max_descs - desc_source.shape[0]), (0, 0)), 'constant', constant_values=0)[:max_descs,:]
desc_target = np.pad(desc_target, ((0, max_descs - desc_target.shape[0]), (0, 0)), 'constant', constant_values=0)[:max_descs,:]

# Normalize feature vectors
desc_source = desc_source / 255
desc_target = desc_target / 255
# Build autoencoder model
input_layer = Input(shape=(desc_source.shape[1],))
encoded = Dense(256, activation='relu')(input_layer)
sparse = Dense(256, activation='relu', activity_regularizer=keras.regularizers.l1(10e-5))(encoded)
decoded = Dense(desc_source.shape[1], activation='sigmoid')(sparse)

autoencoder = Model(input_layer, decoded)

# Compile and train model
autoencoder.compile(optimizer='adam', loss='binary_crossentropy')
autoencoder.fit(desc_source, desc_source, epochs=30, batch_size=128, shuffle=True)

# Encode target features using trained autoencoder
encoded_target = autoencoder.predict(desc_target)

# Compute similarity matrix
similarity_matrix = desc_source.T.dot(encoded_target)

# Find best matches
source_idx, target_idx = np.unravel_index(np.argmax(similarity_matrix), similarity_matrix.shape)

# Convert keypoints to numpy arrays of type float32
pts_source = np.float64([kp.pt for kp in kp_source])
pts_target = np.float64([kp.pt for kp in kp_target])

# Resize point sets if necessary
max_pts = max(pts_source.shape[0], pts_target.shape[0])
pts_source = np.pad(pts_source, ((0, max_pts - pts_source.shape[0]), (0, 0)), 'constant', constant_values=0)[:max_pts,:]
pts_target = np.pad(pts_target, ((0, max_pts - pts_target.shape[0]), (0, 0)), 'constant', constant_values=0)[:max_pts,:]

assert pts_source.ndim == 2 and pts_source.shape[1] == 2, 'Invalid source points'
assert pts_target.ndim == 2 and pts_target.shape[1] == 2, 'Invalid target points'
assert pts_source.shape == pts_target.shape, 'Source and target shape mismatch'

# Check number of keypoints and descriptors
if len(kp_source) < 4 or len(kp_target) < 4:
    raise ValueError('Not enough keypoints')

# Initialize Brute-Force matcher
bf = cv2.BFMatcher()

# Match descriptors using Brute-Force
matches = bf.knnMatch(desc_source, encoded_target, k=2)

# Apply ratio test
good_matches = []
for m,n in matches:
    if m.distance < 0.75 * n.distance:
        good_matches.append(m)

# Check number of good matches
if len(good_matches) < 4:
    raise ValueError('Not enough good matches')

# Convert keypoints into Float32
pts_source = np.float32([kp_source[m.queryIdx].pt for m in good_matches]).reshape(-1, 1, 2)
pts_target = np.float32([kp_target[m.trainIdx].pt for m in good_matches]).reshape(-1, 1, 2)

# Estimate the homography matrix using RANSAC
M, mask = cv2.findHomography(pts_source, pts_target, cv2.RANSAC, 3.0)

# Warp the target image to the source image perspective
warped_image = cv2.warpPerspective(target_image, M, (source_image.shape[1], source_image.shape[0]), flags=cv2.INTER_LINEAR)

# Show the final result
plt.imshow(warped_image[:, :, ::-1])
plt.title('Warped Image'), plt.xticks([]), plt.yticks([])
plt.show()