Spaces:
Sleeping
Sleeping
import gradio as gr | |
import cv2 | |
import numpy as np | |
from PIL import Image | |
import os | |
# Try to load the Haar Cascade classifier for face detection | |
face_cascade = None | |
cascade_paths = [ | |
"haarcascade_frontalface_default.xml", | |
"./haarcascade_frontalface_default.xml", | |
os.path.join(os.path.dirname(__file__), "haarcascade_frontalface_default.xml"), | |
cv2.data.haarcascades + "haarcascade_frontalface_default.xml" | |
] | |
for path in cascade_paths: | |
if os.path.exists(path): | |
face_cascade = cv2.CascadeClassifier(path) | |
if not face_cascade.empty(): | |
print(f"Successfully loaded Haar Cascade from: {path}") | |
break | |
else: | |
print(f"Failed to load Haar Cascade from: {path}") | |
else: | |
print(f"File not found: {path}") | |
if face_cascade is None or face_cascade.empty(): | |
print("Warning: Could not load Haar Cascade classifier. Face detection will be disabled.") | |
face_cascade = None | |
def process_image(image, click_x, click_y, effect_type): | |
if image is None: | |
return None, "Please upload an image first." | |
img_np = np.array(image) | |
img_np_bgr = cv2.cvtColor(img_np, cv2.COLOR_RGB2BGR) | |
processed_img_np_bgr = img_np_bgr.copy() | |
status_message = "" | |
applied_to_region = False | |
# Prioritize clicked region if available | |
if click_x is not None and click_y is not None: | |
# Try to find a face near the click | |
faces = [] | |
if face_cascade is not None: | |
gray = cv2.cvtColor(img_np_bgr, cv2.COLOR_BGR2GRAY) | |
try: | |
all_faces = face_cascade.detectMultiScale(gray, 1.1, 4) | |
min_distance = float("inf") | |
target_face = None | |
for (fx, fy, fw, fh) in all_faces: | |
face_center_x = fx + fw // 2 | |
face_center_y = fy + fh // 2 | |
distance = np.sqrt((face_center_x - click_x)**2 + (face_center_y - click_y)**2) | |
if distance < min_distance and distance < 100: # Within 100 pixels of click | |
min_distance = distance | |
target_face = (fx, fy, fw, fh) | |
if target_face: | |
faces.append(target_face) | |
except Exception as e: | |
print(f"Face detection error during click processing: {e}") | |
if len(faces) > 0: | |
# Apply effect to the detected face near click | |
x, y, w, h = faces[0] | |
roi = processed_img_np_bgr[y:y+h, x:x+w] | |
status_message = f"Applied {effect_type} effect to detected face near click." | |
applied_to_region = True | |
else: | |
# Apply effect to a general region around the click | |
region_size = 100 | |
x1 = max(0, int(click_x - region_size // 2)) | |
y1 = max(0, int(click_y - region_size // 2)) | |
x2 = min(image.width, int(click_x + region_size // 2)) | |
y2 = min(image.height, int(click_y + region_size // 2)) | |
roi = processed_img_np_bgr[y1:y2, x1:x2] | |
status_message = f"Applied {effect_type} effect to clicked region." | |
applied_to_region = True | |
if applied_to_region: | |
if effect_type == "blur": | |
processed_roi = cv2.GaussianBlur(roi, (15, 15), 0) | |
elif effect_type == "sharpen": | |
kernel = np.array([[-1,-1,-1], [-1,9,-1], [-1,-1,-1]]) | |
processed_roi = cv2.filter2D(roi, -1, kernel) | |
elif effect_type == "grayscale": | |
processed_roi = cv2.cvtColor(roi, cv2.COLOR_BGR2GRAY) | |
processed_roi = cv2.cvtColor(processed_roi, cv2.COLOR_GRAY2BGR) | |
elif effect_type == "pixelate": | |
h_roi, w_roi = roi.shape[:2] | |
temp = cv2.resize(roi, (w_roi//10, h_roi//10), interpolation=cv2.INTER_LINEAR) | |
processed_roi = cv2.resize(temp, (w_roi, h_roi), interpolation=cv2.INTER_NEAREST) | |
else: | |
processed_roi = roi | |
if len(faces) > 0: | |
processed_img_np_bgr[y:y+h, x:x+w] = processed_roi | |
else: | |
processed_img_np_bgr[y1:y2, x1:x2] = processed_roi | |
if not applied_to_region: # Fallback if no click or no specific region applied | |
# Apply effect to all detected faces if no click or no face near click | |
faces = [] | |
if face_cascade is not None: | |
gray = cv2.cvtColor(img_np_bgr, cv2.COLOR_BGR2GRAY) | |
try: | |
faces = face_cascade.detectMultiScale(gray, 1.1, 4) | |
except Exception as e: | |
print(f"Face detection error: {e}") | |
faces = [] | |
if len(faces) > 0: | |
for (x, y, w, h) in faces: | |
roi = processed_img_np_bgr[y:y+h, x:x+w] | |
if effect_type == "blur": | |
processed_roi = cv2.GaussianBlur(roi, (35, 35), 0) | |
elif effect_type == "sharpen": | |
kernel = np.array([[-1,-1,-1], [-1,9,-1], [-1,-1,-1]]) | |
processed_roi = cv2.filter2D(roi, -1, kernel) | |
elif effect_type == "grayscale": | |
processed_roi = cv2.cvtColor(roi, cv2.COLOR_BGR2GRAY) | |
processed_roi = cv2.cvtColor(processed_roi, cv2.COLOR_GRAY2BGR) | |
elif effect_type == "pixelate": | |
h_roi, w_roi = roi.shape[:2] | |
temp = cv2.resize(roi, (w_roi//10, h_roi//10), interpolation=cv2.INTER_LINEAR) | |
processed_roi = cv2.resize(temp, (w_roi, h_roi), interpolation=cv2.INTER_NEAREST) | |
else: | |
processed_roi = roi | |
processed_img_np_bgr[y:y+h, x:x+w] = processed_roi | |
status_message = f"Applied {effect_type} effect to {len(faces)} detected face(s)." | |
else: | |
# Apply effect to center region if no faces detected and no click | |
h, w = img_np_bgr.shape[:2] | |
center_x, center_y = w // 2, h // 2 | |
region_size = min(200, w//3, h//3) | |
x1 = max(0, center_x - region_size // 2) | |
y1 = max(0, center_y - region_size // 2) | |
x2 = min(w, center_x + region_size // 2) | |
y2 = min(h, center_y + region_size // 2) | |
roi = processed_img_np_bgr[y1:y2, x1:x1+roi.shape[1]] = processed_roi | |
if face_cascade is None: | |
status_message = f"Applied {effect_type} effect to center region (face detection unavailable)." | |
else: | |
status_message = f"No faces detected. Applied {effect_type} effect to center region." | |
img_pil = Image.fromarray(cv2.cvtColor(processed_img_np_bgr, cv2.COLOR_BGR2RGB)) | |
return img_pil, status_message | |
def detect_faces_only(image): | |
if image is None: | |
return None, "Please upload an image first." | |
if face_cascade is None: | |
return image, "Face detection is not available (Haar Cascade not loaded)." | |
img_np = np.array(image) | |
img_np_bgr = cv2.cvtColor(img_np, cv2.COLOR_RGB2BGR) | |
gray = cv2.cvtColor(img_np_bgr, cv2.COLOR_BGR2GRAY) | |
try: | |
faces = face_cascade.detectMultiScale(gray, 1.1, 4) | |
except Exception as e: | |
return image, f"Face detection error: {str(e)}" | |
# Draw rectangles around detected faces | |
for (x, y, w, h) in faces: | |
cv2.rectangle(img_np_bgr, (x, y), (x+w, y+h), (255, 0, 0), 2) | |
img_pil = Image.fromarray(cv2.cvtColor(img_np_bgr, cv2.COLOR_BGR2RGB)) | |
return img_pil, f"Detected {len(faces)} face(s)." | |
# Custom CSS for better styling | |
css = """ | |
.gradio-container { | |
font-family: 'Segoe UI', Tahoma, Geneva, Verdana, sans-serif; | |
} | |
.main-header { | |
text-align: center; | |
color: #2c3e50; | |
margin-bottom: 20px; | |
} | |
.instruction-text { | |
background-color: #f8f9fa; | |
padding: 15px; | |
border-radius: 8px; | |
border-left: 4px solid #007bff; | |
margin-bottom: 20px; | |
} | |
""" | |
# Gradio interface | |
with gr.Blocks(css=css, title="AI Image Editor") as demo: | |
gr.HTML("<h1 class='main-header'>π¨ AI Image Editor (CPU-friendly)</h1>") | |
face_detection_status = "β Face detection enabled" if face_cascade is not None else "β οΈ Face detection disabled (Haar Cascade not found)" | |
gr.HTML(f""" | |
<div class='instruction-text'> | |
<strong>Status:</strong> {face_detection_status}<br><br> | |
<strong>Instructions:</strong> | |
<ol> | |
<li>Upload an image using the file uploader</li> | |
<li>Click on the image to select a region (optional)</li> | |
<li>Choose an effect from the dropdown menu</li> | |
<li>Click "Apply Effect" to process the image</li> | |
<li>If face detection is available, use "Detect Faces" to see detected faces</li> | |
</ol> | |
<em>Note: If you click on the image, the effect will be applied to the clicked region (prioritizing faces near the click). Otherwise, if face detection is available, effects will be applied to all detected faces. As a last resort, effects will be applied to the center region.</em> | |
</div> | |
""") | |
with gr.Row(): | |
with gr.Column(scale=1): | |
input_image = gr.Image( | |
type="pil", | |
label="π Upload Image", | |
interactive=True, | |
height=400 | |
) | |
with gr.Row(): | |
effect_dropdown = gr.Dropdown( | |
["None", "blur", "sharpen", "grayscale", "pixelate"], | |
label="π Select Effect", | |
value="blur" | |
) | |
with gr.Row(): | |
process_button = gr.Button("β¨ Apply Effect", variant="primary", size="lg") | |
if face_cascade is not None: | |
detect_button = gr.Button("π€ Detect Faces", variant="secondary", size="lg") | |
status_text = gr.Textbox( | |
label="π Status", | |
interactive=False, | |
placeholder="Ready to process..." | |
) | |
with gr.Column(scale=1): | |
output_image = gr.Image( | |
type="pil", | |
label="πΌοΈ Processed Image", | |
height=400 | |
) | |
# Store click coordinates | |
clicked_x = gr.State(None) | |
clicked_y = gr.State(None) | |
def get_coords(evt: gr.SelectData): | |
if evt.index is not None and len(evt.index) == 2: | |
return evt.index[0], evt.index[1] | |
return None, None | |
input_image.select(get_coords, None, [clicked_x, clicked_y]) | |
process_button.click( | |
fn=process_image, | |
inputs=[input_image, clicked_x, clicked_y, effect_dropdown], | |
outputs=[output_image, status_text] | |
) | |
if face_cascade is not None: | |
detect_button.click( | |
fn=detect_faces_only, | |
inputs=[input_image], | |
outputs=[output_image, status_text] | |
) | |
gr.HTML(""" | |
<div style='text-align: center; margin-top: 20px; color: #6c757d;'> | |
<p>Built with β€οΈ for CPU-friendly image processing | Powered by OpenCV & Gradio</p> | |
</div> | |
""") | |
if __name__ == "__main__": | |
demo.launch() | |