ElenaRyumina commited on
Commit
2db5685
β€’
1 Parent(s): 007e35f
Files changed (2) hide show
  1. app.py +10 -1
  2. app/app_utils.py +10 -7
app.py CHANGED
@@ -53,7 +53,16 @@ with gr.Blocks(css="app.css") as demo:
53
  output_statistics = gr.Plot(label="Statistics of emotions", elem_classes="stat")
54
  gr.Examples(
55
  ["videos/video1.mp4",
56
- "videos/video2.mp4"],
 
 
 
 
 
 
 
 
 
57
  [input_video],
58
  )
59
 
 
53
  output_statistics = gr.Plot(label="Statistics of emotions", elem_classes="stat")
54
  gr.Examples(
55
  ["videos/video1.mp4",
56
+ "videos/video2.mp4",
57
+ "new_videos/01.mp4",
58
+ "new_videos/02.mp4",
59
+ "new_videos/14.mp4",
60
+ "new_videos/16.mp4",
61
+ "new_videos/20.mp4",
62
+ "new_videos/36.mp4",
63
+ "new_videos/38.mp4",
64
+ "new_videos/45.mp4",
65
+ ],
66
  [input_video],
67
  )
68
 
app/app_utils.py CHANGED
@@ -44,11 +44,12 @@ def preprocess_image_and_predict(inp):
44
  startX, startY, endX, endY = get_box(fl, w, h)
45
  cur_face = inp[startY:endY, startX:endX]
46
  cur_face_n = pth_processing(Image.fromarray(cur_face))
47
- prediction = (
48
- torch.nn.functional.softmax(pth_model_static(cur_face_n), dim=1)
49
- .detach()
50
- .numpy()[0]
51
- )
 
52
  confidences = {DICT_EMO[i]: float(prediction[i]) for i in range(7)}
53
  grayscale_cam = cam(input_tensor=cur_face_n)
54
  grayscale_cam = grayscale_cam[0, :]
@@ -104,7 +105,8 @@ def preprocess_video_and_predict(video):
104
 
105
  if count_face%config_data.FRAME_DOWNSAMPLING == 0:
106
  cur_face_copy = pth_processing(Image.fromarray(cur_face))
107
- features = torch.nn.functional.relu(pth_model_static.extract_features(cur_face_copy)).detach().numpy()
 
108
 
109
  grayscale_cam = cam(input_tensor=cur_face_copy)
110
  grayscale_cam = grayscale_cam[0, :]
@@ -120,7 +122,8 @@ def preprocess_video_and_predict(video):
120
 
121
  lstm_f = torch.from_numpy(np.vstack(lstm_features))
122
  lstm_f = torch.unsqueeze(lstm_f, 0)
123
- output = pth_model_dynamic(lstm_f).detach().numpy()
 
124
  last_output = output
125
 
126
  if count_face == 0:
 
44
  startX, startY, endX, endY = get_box(fl, w, h)
45
  cur_face = inp[startY:endY, startX:endX]
46
  cur_face_n = pth_processing(Image.fromarray(cur_face))
47
+ with torch.no_grad():
48
+ prediction = (
49
+ torch.nn.functional.softmax(pth_model_static(cur_face_n), dim=1)
50
+ .detach()
51
+ .numpy()[0]
52
+ )
53
  confidences = {DICT_EMO[i]: float(prediction[i]) for i in range(7)}
54
  grayscale_cam = cam(input_tensor=cur_face_n)
55
  grayscale_cam = grayscale_cam[0, :]
 
105
 
106
  if count_face%config_data.FRAME_DOWNSAMPLING == 0:
107
  cur_face_copy = pth_processing(Image.fromarray(cur_face))
108
+ with torch.no_grad():
109
+ features = torch.nn.functional.relu(pth_model_static.extract_features(cur_face_copy)).detach().numpy()
110
 
111
  grayscale_cam = cam(input_tensor=cur_face_copy)
112
  grayscale_cam = grayscale_cam[0, :]
 
122
 
123
  lstm_f = torch.from_numpy(np.vstack(lstm_features))
124
  lstm_f = torch.unsqueeze(lstm_f, 0)
125
+ with torch.no_grad():
126
+ output = pth_model_dynamic(lstm_f).detach().numpy()
127
  last_output = output
128
 
129
  if count_face == 0: