matheus-erthal
commited on
Commit
•
c91fae7
1
Parent(s):
baea08a
Commit inicial
Browse files- README.md +14 -0
- app.py +38 -0
- haarcascade_frontalface_alt2.xml +0 -0
- image_crop.py +59 -0
- requirements.txt +6 -0
- services/aws_service.py +28 -0
README.md
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
title: Autocrop Image
|
2 |
+
emoji: 🐨
|
3 |
+
colorFrom: indigo
|
4 |
+
colorTo: green
|
5 |
+
sdk: gradio
|
6 |
+
sdk_version: 3.44.4
|
7 |
+
app_file: app.py
|
8 |
+
pinned: false
|
9 |
+
|
10 |
+
```
|
11 |
+
sudo apt-get install python3-opencv
|
12 |
+
pip install -r requirements.txt
|
13 |
+
python3 main.py
|
14 |
+
```
|
app.py
ADDED
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import gradio as gr
|
3 |
+
from services.aws_service import AwsService
|
4 |
+
from dotenv import load_dotenv
|
5 |
+
from image_crop import crop_faces
|
6 |
+
import cv2
|
7 |
+
import numpy as np
|
8 |
+
|
9 |
+
load_dotenv()
|
10 |
+
|
11 |
+
def crop_photoshoot_images(photo_shoot_id):
|
12 |
+
folder = "PhotoShoots/" + str(photo_shoot_id) + "/Inputs"
|
13 |
+
files = AwsService.get_files_from_s3(os.environ.get('AWS_S3_BUCKET'), folder)
|
14 |
+
|
15 |
+
for file in files:
|
16 |
+
s3_object = AwsService.get_image_from_s3(os.environ.get('AWS_S3_BUCKET'), file['Key'])
|
17 |
+
image = s3_object["pil"]
|
18 |
+
key = s3_object["key"]
|
19 |
+
cv2_img = np.array(image)
|
20 |
+
cv2_img = cv2_img[:, :, ::-1].copy()
|
21 |
+
faces = crop_faces(cv2_img)
|
22 |
+
i = 1
|
23 |
+
for face in faces:
|
24 |
+
filename = f'{key}-{i}.jpg'
|
25 |
+
cv2.imwrite(filename, face)
|
26 |
+
AwsService.send_image_to_s3(filename, os.environ.get('AWS_S3_BUCKET'), f'PhotoShoots/{photo_shoot_id}/Croppeds/{key}-{i}.jpg')
|
27 |
+
os.remove(filename)
|
28 |
+
i += 1
|
29 |
+
|
30 |
+
return "done"
|
31 |
+
|
32 |
+
iface = gr.Interface(
|
33 |
+
fn=crop_photoshoot_images,
|
34 |
+
inputs=[gr.Textbox(lines=1, placeholder="Photo Shoot ID")],
|
35 |
+
outputs=["text"]
|
36 |
+
)
|
37 |
+
|
38 |
+
iface.launch()
|
haarcascade_frontalface_alt2.xml
ADDED
File without changes
|
image_crop.py
ADDED
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import cv2
|
2 |
+
|
3 |
+
def discover_square_crop_points(points, image, step=3, maximum_step=30):
|
4 |
+
|
5 |
+
x = points["x"]
|
6 |
+
y = points["y"]
|
7 |
+
w = points["w"]
|
8 |
+
h = points["h"]
|
9 |
+
|
10 |
+
if step >= maximum_step:
|
11 |
+
return {"initial_x": x, "initial_y": y, "final_x": x + w, "final_y": x + h}
|
12 |
+
|
13 |
+
img_height = image.shape[0]
|
14 |
+
img_width = image.shape[1]
|
15 |
+
|
16 |
+
crop_offset = ((w + h) / 2) / step
|
17 |
+
|
18 |
+
initial_x = x - crop_offset
|
19 |
+
initial_y = y - crop_offset
|
20 |
+
final_x = x + w + crop_offset
|
21 |
+
final_y = y + h + crop_offset
|
22 |
+
|
23 |
+
if initial_x < 0 or initial_y < 0 or final_x > img_width or final_y > img_height:
|
24 |
+
return discover_square_crop_points(points, image, step+1, maximum_step)
|
25 |
+
|
26 |
+
initial_x = int(initial_x)
|
27 |
+
initial_y = int(initial_y)
|
28 |
+
final_x = int(final_x)
|
29 |
+
final_y = int(final_y)
|
30 |
+
|
31 |
+
print(f"step: {step}")
|
32 |
+
return {"initial_x": initial_x, "initial_y": initial_y, "final_x": final_x, "final_y": final_y}
|
33 |
+
|
34 |
+
def crop_faces(cv2_image):
|
35 |
+
gray = cv2.cvtColor(cv2_image, cv2.COLOR_BGR2GRAY)
|
36 |
+
face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_alt2.xml')
|
37 |
+
|
38 |
+
multiScaleValues = [1.01]
|
39 |
+
|
40 |
+
for value in multiScaleValues:
|
41 |
+
faces = face_cascade.detectMultiScale(image=gray, scaleFactor=value, minNeighbors=4, minSize=(512, 512))
|
42 |
+
if len(faces) > 0:
|
43 |
+
break
|
44 |
+
|
45 |
+
cropped_faces = []
|
46 |
+
|
47 |
+
for (x, y, w, h) in faces:
|
48 |
+
points = {"x": x, "y": y, "w": w, "h": h}
|
49 |
+
points = discover_square_crop_points(points, cv2_image)
|
50 |
+
|
51 |
+
initial_x = points["initial_x"]
|
52 |
+
initial_y = points["initial_y"]
|
53 |
+
final_x = points["final_x"]
|
54 |
+
final_y = points["final_y"]
|
55 |
+
|
56 |
+
face = cv2_image[initial_y:final_y, initial_x:final_x]
|
57 |
+
cropped_faces.append(face)
|
58 |
+
|
59 |
+
return cropped_faces
|
requirements.txt
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
gradio==3.44.4
|
2 |
+
markupsafe==2.0.1
|
3 |
+
boto3==1.26.25
|
4 |
+
botocore==1.29.25
|
5 |
+
opencv-python==4.8.1.78
|
6 |
+
python-dotenv==1.0.0
|
services/aws_service.py
ADDED
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import boto3
|
3 |
+
from PIL import Image
|
4 |
+
from io import BytesIO
|
5 |
+
|
6 |
+
class AwsService:
|
7 |
+
def session():
|
8 |
+
return boto3.Session(
|
9 |
+
aws_access_key_id = os.environ.get('AWS_ACCESS_KEY_ID'),
|
10 |
+
aws_secret_access_key = os.environ.get('AWS_SECRET_ACCESS_KEY'),
|
11 |
+
region_name=os.environ.get('AWS_REGION')
|
12 |
+
)
|
13 |
+
|
14 |
+
def s3_client():
|
15 |
+
return AwsService.session().client('s3')
|
16 |
+
|
17 |
+
def get_files_from_s3(bucket, prefix):
|
18 |
+
return AwsService.s3_client().list_objects(Bucket=bucket, Prefix=prefix)['Contents']
|
19 |
+
|
20 |
+
def get_image_from_s3(bucket, key):
|
21 |
+
file_byte_string = AwsService.s3_client().get_object(Bucket=bucket, Key=key)['Body'].read()
|
22 |
+
return {
|
23 |
+
'key': key.split('/')[-1].split('.')[0],
|
24 |
+
'pil': Image.open(BytesIO(file_byte_string))
|
25 |
+
}
|
26 |
+
|
27 |
+
def send_image_to_s3(file, bucket, key):
|
28 |
+
return AwsService.s3_client().upload_file(file, bucket, key, ExtraArgs={'ACL':'public-read', 'ContentType': 'image/png'})
|