from inference_sdk import InferenceHTTPClient from config import Settings from PIL import Image, ImageDraw def draw_rectangle(image, x, y, width, height, **kwargs): # Create a draw object draw = ImageDraw.Draw(image) # Calculate the top-left and bottom-right corners of the rectangle x1 = x - width // 2 y1 = y - height // 2 x2 = x1 + width y2 = y1 + height # Draw the rectangle draw.rectangle(((x1, y1), (x2, y2)), fill=(255, 255, 255)) return image def crop_image(image, x, y, width, height, **kwargs): # Calculate the top-left and bottom-right corners of the cropping area left = x - width // 2 top = y - height // 2 right = left + width bottom = top + height # Crop the image cropped_image = image.crop((left, top, right, bottom)) return cropped_image, left, top, (right-left), (bottom-top) def DetectHandwritting(image): settings = Settings() CLIENT = InferenceHTTPClient( api_url=settings.ROBOFLOW_URL, api_key=settings.ROBOFLOW_API_KEY ) result = CLIENT.infer(image, model_id=settings.YOLO_MODEL_ID) cpy = image.copy() handwritten_parts = [] for prediction in result['predictions']: handwritten_parts.append(crop_image(cpy, **prediction)) cpy = draw_rectangle(cpy, **prediction) return cpy, handwritten_parts