import gradio as gr import cv2 import numpy as np from PIL import Image import os # Try to load the Haar Cascade classifier for face detection face_cascade = None cascade_paths = [ "haarcascade_frontalface_default.xml", "./haarcascade_frontalface_default.xml", os.path.join(os.path.dirname(__file__), "haarcascade_frontalface_default.xml"), cv2.data.haarcascades + "haarcascade_frontalface_default.xml" ] for path in cascade_paths: if os.path.exists(path): face_cascade = cv2.CascadeClassifier(path) if not face_cascade.empty(): print(f"Successfully loaded Haar Cascade from: {path}") break else: print(f"Failed to load Haar Cascade from: {path}") else: print(f"File not found: {path}") if face_cascade is None or face_cascade.empty(): print("Warning: Could not load Haar Cascade classifier. Face detection will be disabled.") face_cascade = None def process_image(image, click_x, click_y, effect_type): if image is None: return None, "Please upload an image first." img_np = np.array(image) img_np_bgr = cv2.cvtColor(img_np, cv2.COLOR_RGB2BGR) processed_img_np_bgr = img_np_bgr.copy() status_message = "" applied_to_region = False # Prioritize clicked region if available if click_x is not None and click_y is not None: # Try to find a face near the click faces = [] if face_cascade is not None: gray = cv2.cvtColor(img_np_bgr, cv2.COLOR_BGR2GRAY) try: all_faces = face_cascade.detectMultiScale(gray, 1.1, 4) min_distance = float("inf") target_face = None for (fx, fy, fw, fh) in all_faces: face_center_x = fx + fw // 2 face_center_y = fy + fh // 2 distance = np.sqrt((face_center_x - click_x)**2 + (face_center_y - click_y)**2) if distance < min_distance and distance < 100: # Within 100 pixels of click min_distance = distance target_face = (fx, fy, fw, fh) if target_face: faces.append(target_face) except Exception as e: print(f"Face detection error during click processing: {e}") if len(faces) > 0: # Apply effect to the detected face near click x, y, w, h = faces[0] roi = processed_img_np_bgr[y:y+h, x:x+w] status_message = f"Applied {effect_type} effect to detected face near click." applied_to_region = True else: # Apply effect to a general region around the click region_size = 100 x1 = max(0, int(click_x - region_size // 2)) y1 = max(0, int(click_y - region_size // 2)) x2 = min(image.width, int(click_x + region_size // 2)) y2 = min(image.height, int(click_y + region_size // 2)) roi = processed_img_np_bgr[y1:y2, x1:x2] status_message = f"Applied {effect_type} effect to clicked region." applied_to_region = True if applied_to_region: if effect_type == "blur": processed_roi = cv2.GaussianBlur(roi, (15, 15), 0) elif effect_type == "sharpen": kernel = np.array([[-1,-1,-1], [-1,9,-1], [-1,-1,-1]]) processed_roi = cv2.filter2D(roi, -1, kernel) elif effect_type == "grayscale": processed_roi = cv2.cvtColor(roi, cv2.COLOR_BGR2GRAY) processed_roi = cv2.cvtColor(processed_roi, cv2.COLOR_GRAY2BGR) elif effect_type == "pixelate": h_roi, w_roi = roi.shape[:2] temp = cv2.resize(roi, (w_roi//10, h_roi//10), interpolation=cv2.INTER_LINEAR) processed_roi = cv2.resize(temp, (w_roi, h_roi), interpolation=cv2.INTER_NEAREST) else: processed_roi = roi if len(faces) > 0: processed_img_np_bgr[y:y+h, x:x+w] = processed_roi else: processed_img_np_bgr[y1:y2, x1:x2] = processed_roi if not applied_to_region: # Fallback if no click or no specific region applied # Apply effect to all detected faces if no click or no face near click faces = [] if face_cascade is not None: gray = cv2.cvtColor(img_np_bgr, cv2.COLOR_BGR2GRAY) try: faces = face_cascade.detectMultiScale(gray, 1.1, 4) except Exception as e: print(f"Face detection error: {e}") faces = [] if len(faces) > 0: for (x, y, w, h) in faces: roi = processed_img_np_bgr[y:y+h, x:x+w] if effect_type == "blur": processed_roi = cv2.GaussianBlur(roi, (35, 35), 0) elif effect_type == "sharpen": kernel = np.array([[-1,-1,-1], [-1,9,-1], [-1,-1,-1]]) processed_roi = cv2.filter2D(roi, -1, kernel) elif effect_type == "grayscale": processed_roi = cv2.cvtColor(roi, cv2.COLOR_BGR2GRAY) processed_roi = cv2.cvtColor(processed_roi, cv2.COLOR_GRAY2BGR) elif effect_type == "pixelate": h_roi, w_roi = roi.shape[:2] temp = cv2.resize(roi, (w_roi//10, h_roi//10), interpolation=cv2.INTER_LINEAR) processed_roi = cv2.resize(temp, (w_roi, h_roi), interpolation=cv2.INTER_NEAREST) else: processed_roi = roi processed_img_np_bgr[y:y+h, x:x+w] = processed_roi status_message = f"Applied {effect_type} effect to {len(faces)} detected face(s)." else: # Apply effect to center region if no faces detected and no click h, w = img_np_bgr.shape[:2] center_x, center_y = w // 2, h // 2 region_size = min(200, w//3, h//3) x1 = max(0, center_x - region_size // 2) y1 = max(0, center_y - region_size // 2) x2 = min(w, center_x + region_size // 2) y2 = min(h, center_y + region_size // 2) roi = processed_img_np_bgr[y1:y2, x1:x1+roi.shape[1]] = processed_roi if face_cascade is None: status_message = f"Applied {effect_type} effect to center region (face detection unavailable)." else: status_message = f"No faces detected. Applied {effect_type} effect to center region." img_pil = Image.fromarray(cv2.cvtColor(processed_img_np_bgr, cv2.COLOR_BGR2RGB)) return img_pil, status_message def detect_faces_only(image): if image is None: return None, "Please upload an image first." if face_cascade is None: return image, "Face detection is not available (Haar Cascade not loaded)." img_np = np.array(image) img_np_bgr = cv2.cvtColor(img_np, cv2.COLOR_RGB2BGR) gray = cv2.cvtColor(img_np_bgr, cv2.COLOR_BGR2GRAY) try: faces = face_cascade.detectMultiScale(gray, 1.1, 4) except Exception as e: return image, f"Face detection error: {str(e)}" # Draw rectangles around detected faces for (x, y, w, h) in faces: cv2.rectangle(img_np_bgr, (x, y), (x+w, y+h), (255, 0, 0), 2) img_pil = Image.fromarray(cv2.cvtColor(img_np_bgr, cv2.COLOR_BGR2RGB)) return img_pil, f"Detected {len(faces)} face(s)." # Custom CSS for better styling css = """ .gradio-container { font-family: 'Segoe UI', Tahoma, Geneva, Verdana, sans-serif; } .main-header { text-align: center; color: #2c3e50; margin-bottom: 20px; } .instruction-text { background-color: #f8f9fa; padding: 15px; border-radius: 8px; border-left: 4px solid #007bff; margin-bottom: 20px; } """ # Gradio interface with gr.Blocks(css=css, title="AI Image Editor") as demo: gr.HTML("
Built with ❤️ for CPU-friendly image processing | Powered by OpenCV & Gradio