import gradio as gr import torch from fastai.vision.all import * from PIL import ImageFilter, ImageEnhance from diffusers.utils import make_image_grid from tqdm import tqdm from diffusers import AutoPipelineForInpainting, LCMScheduler, DDIMScheduler from diffusers import StableDiffusionControlNetInpaintPipeline, ControlNetModel import numpy as np from PIL import Image preferred_dtype = torch.float32 preferred_device = "cuda" if torch.cuda.is_available() else "cpu" def label_func(fn): return path/"labels"/f"{fn.stem}_P{fn.suffix}" segmodel = load_learner("camvid-256.pkl") seg_vocabulary = ['Animal', 'Archway', 'Bicyclist', 'Bridge', 'Building', 'Car', 'CartLuggagePram', 'Child', 'Column_Pole', 'Fence', 'LaneMkgsDriv', 'LaneMkgsNonDriv', 'Misc_Text', 'MotorcycleScooter', 'OtherMoving', 'ParkingBlock', 'Pedestrian', 'Road', 'RoadShoulder', 'Sidewalk', 'SignSymbol', 'Sky', 'SUVPickupTruck', 'TrafficCone', 'TrafficLight', 'Train', 'Tree', 'Truck_Bus', 'Tunnel', 'VegetationMisc', 'Void', 'Wall'] ban_cars_mask = np.array([0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0], dtype=np.uint8) def get_seg_mask(img): mask = segmodel.predict(img)[0] return mask def display_mask(img, mask): # Convert the grayscale mask to RGB mask_rgb = np.stack([np.zeros_like(mask), mask, np.zeros_like(mask)], axis=-1) # Convert the image to PIL format img_pil = Image.fromarray(img) # Convert the mask to PIL format mask_pil = Image.fromarray((mask_rgb * 255).astype(np.uint8)) # Overlay the mask on the image overlaid_img = Image.blend(img_pil, mask_pil, alpha=0.5) return overlaid_img def redact_image(img): img = img.resize((256, 256)) mask = get_seg_mask(img) car_mask = ban_cars_mask[mask] return display_mask(img, car_mask) iface = gr.Interface(fn=redact_image, gr.Image(sources=["webcam"], streaming=True), "image", live=True) iface.launch()