import gradio as gr import cv2 import numpy as np from PIL import Image from ultralytics import YOLO from SignatureSimilarity import SignatureSimilarity signature_similarity = SignatureSimilarity("best.pt") def preprocess_signature(image): """Enhanced preprocessing with deskewing and cleaning""" if not isinstance(image, Image.Image): image = Image.fromarray(image) # Convert to numpy array img_array = np.array(image) # Convert to grayscale if needed if len(img_array.shape) == 3: gray = cv2.cvtColor(img_array, cv2.COLOR_RGB2GRAY) else: gray = img_array # Otsu's thresholding _, binary = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU) return Image.fromarray(binary) def draw_feature_matches(img1_cv, img2_cv): """Enhanced feature matching visualization""" # SIFT feature detection sift = cv2.SIFT_create() kp1, des1 = sift.detectAndCompute(img1_cv, None) kp2, des2 = sift.detectAndCompute(img2_cv, None) if des1 is None or des2 is None or len(des1) < 2 or len(des2) < 2: return None # Feature matching with FLANN FLANN_INDEX_KDTREE = 1 index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5) search_params = dict(checks=50) flann = cv2.FlannBasedMatcher(index_params, search_params) try: matches = flann.knnMatch(des1, des2, k=2) except Exception: return None # Ratio test good_matches = [] for i, (m, n) in enumerate(matches): if m.distance < 0.75 * n.distance: good_matches.append(m) # Draw matches if enough found if len(good_matches) >= 4: matches_img = cv2.drawMatches( img1_cv, kp1, img2_cv, kp2, good_matches, None, flags=cv2.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS ) return cv2.cvtColor(matches_img, cv2.COLOR_BGR2RGB) return None def process_images(image1, image2, clip_threshold=0.8, match_threshold=0.4): """Main processing pipeline""" try: # Convert to PIL if needed if not isinstance(image1, Image.Image): image1 = Image.fromarray(image1) if not isinstance(image2, Image.Image): image2 = Image.fromarray(image2) # Preprocess images image1 = preprocess_signature(image1) image2 = preprocess_signature(image2) # Update CLIP threshold signature_similarity.clip_threshold = clip_threshold # Get similarity results cropped1, cropped2, similarity_info = signature_similarity.process_images( image1, image2, match_threshold=match_threshold ) if cropped1 is None or cropped2 is None: return None, "Error: Could not detect signatures in one or both images." # Convert to grayscale for feature matching img1_cv = cv2.cvtColor(np.array(cropped1), cv2.COLOR_RGB2GRAY) img2_cv = cv2.cvtColor(np.array(cropped2), cv2.COLOR_RGB2GRAY) # Get feature matches visualization matches_img = draw_feature_matches(img1_cv, img2_cv) if matches_img is None: # Create side-by-side comparison instead combined = Image.new('RGB', (cropped1.width + cropped2.width, max(cropped1.height, cropped2.height))) combined.paste(cropped1, (0, 0)) combined.paste(cropped2, (cropped1.width, 0)) matches_img = np.array(combined) return matches_img, similarity_info except Exception as e: return None, f"Error processing images: {str(e)}" # Create Gradio interface iface = gr.Interface( fn=process_images, inputs=[ gr.Image(label="Reference Signature", type="numpy"), gr.Image(label="Test Signature", type="numpy"), gr.Slider( minimum=0.1, maximum=0.9, value=0.7, step=0.1, label="CLIP Similarity Threshold", info="Lower values are more lenient, higher values are stricter" ), gr.Slider( minimum=0.1, maximum=0.9, value=0.70, step=0.05, label="Match Threshold", info="Overall threshold for signature matching" ) ], outputs=[ gr.Image(label="Signature Comparison"), gr.Markdown(label="Results") ], title="Enhanced Signature Detection and Matching", description=""" Upload two signature images to compare them. The system uses: - Advanced preprocessing and deskewing - CLIP-based semantic similarity - Shape and structural analysis - Feature point matching Higher similarity scores indicate better matches. """, examples=[ # Add example image paths if you have them ] ) # Launch the app if __name__ == "__main__": iface.launch()