File size: 1,927 Bytes
48291e1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0c1dd68
 
2965377
48291e1
 
 
 
 
 
 
 
 
 
 
 
144074e
48291e1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
caaf5d9
 
48291e1
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
import gradio as gr
from fastai.vision.all import *
import ultralytics
from ultralytics import YOLO
from PIL import Image, ImageDraw, ImageFont

# import os
# Load a pre-trained image classification model
import pathlib
plt = platform.system()
if plt == 'Windows': pathlib.PosixPath = pathlib.WindowsPath
if plt == 'Linux': pathlib.WindowsPath = pathlib.PosixPath

root = os.path.dirname(__file__)

detect = YOLO('./models/detect.pt')
mood = load_learner("./models/mood.pkl")


# Function to make predictions from an image
def process(imagep):
    boxes = detect.predict(imagep)
    image = Image.open(imagep)
    image_with_boxes = image.copy()
    
    draw = ImageDraw.Draw(image_with_boxes)

    for i, box in enumerate(boxes[0].boxes.xyxy):
        # print(box)
        x1, y1, x2, y2 = int(box[0]), int(box[1]), int(box[2]), int(box[3])
        
        cropped_image = image.crop((x1, y1, x2, y2))
        resized_image = cropped_image.resize((48, 48))
        grayscale_image = resized_image.convert('L')

        w = 1 + (y2+x2-y1-x1)//50
        pil_image = PILImage.create(grayscale_image)
        prediction = mood.predict(pil_image)
        # print(prediction)
        text = prediction[0]
        text_position = (x1 + w, y1 + w)
        draw.rectangle([x1, y1, x2, y2], outline="red", width=w)
        font = ImageFont.truetype("opensans.ttf", 5*w)
        draw.text(text_position, text, fill="blue",font=font, stroke_width=int(w*0.2))
    
    return image_with_boxes



# Sample images for user to choose from
sample_images = ["./sample_images/angry.jpg", "./sample_images/office.jpg","./sample_images/friends.jpg"]

iface = gr.Interface(
    fn=process,
    inputs=gr.Image(label="Select an image", type="filepath"),
    outputs='image',
    live=False,
    title="Face Mood predictor",
    description="Upload a facial image or select one of the examples below",
    examples=sample_images
)


iface.launch()