asrs777 commited on
Commit
b14dda0
1 Parent(s): d1c6f1f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -2
app.py CHANGED
@@ -7,6 +7,7 @@ import torch
7
  from transformers import AutoModelForImageClassification
8
  from optimum.pipelines import pipeline
9
  from PIL import Image
 
10
  device = 1 if torch.cuda.is_available() else "cpu"
11
 
12
  chk_point = "kdhht2334/autotrain-diffusion-emotion-facial-expression-recognition-40429105176"
@@ -32,6 +33,9 @@ emotion_dict = {
32
  }
33
 
34
 
 
 
 
35
  try:
36
  pipe = pipeline(
37
  "image-classification",
@@ -52,11 +56,16 @@ def face_detector(input_img):
52
  crop = img.crop(bbox)
53
  return crop
54
 
55
-
56
  def predict(image):
57
  cropped_face = face_detector(image)
 
 
 
 
 
 
58
  predictions = pipe(cropped_face)
59
- return cropped_face, {p["label"]: p["score"] for p in predictions}
60
 
61
  gr.Interface(
62
  predict,
 
7
  from transformers import AutoModelForImageClassification
8
  from optimum.pipelines import pipeline
9
  from PIL import Image
10
+ import numpy as np
11
  device = 1 if torch.cuda.is_available() else "cpu"
12
 
13
  chk_point = "kdhht2334/autotrain-diffusion-emotion-facial-expression-recognition-40429105176"
 
33
  }
34
 
35
 
36
+ output_img_size = (2100, 700)
37
+
38
+
39
  try:
40
  pipe = pipeline(
41
  "image-classification",
 
56
  crop = img.crop(bbox)
57
  return crop
58
 
 
59
  def predict(image):
60
  cropped_face = face_detector(image)
61
+ face_w, face_h = cropped_face.size
62
+ print(face_h, face_w)
63
+ face_re_w = int(face_w * (700 / face_h))
64
+ resized_face = cropped_face.resize((face_re_w, 700))
65
+ output_img = Image.new("RGBA", output_img_size)
66
+ output_img.paste(resized_face, (1050 - int(face_re_w/2), 0))
67
  predictions = pipe(cropped_face)
68
+ return output_img, {p["label"]: p["score"] for p in predictions}
69
 
70
  gr.Interface(
71
  predict,