sunana commited on
Commit
342d9ff
1 Parent(s): 5dc1a2c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -27,14 +27,14 @@ def process_images(videos, x, y):
27
  images = images[:11]
28
  # transform images to a list of images tensor
29
  images = [torch.from_numpy(img).permute(2, 0, 1).float().to(device).unsqueeze(0) / 255.0 for img in images]
30
- # if the max size of the image is larger than 1024, resize the image to 1024 with same ratio
31
  max_size = max(images[0].shape[2], images[0].shape[3])
32
  if max_size > 768:
33
  ratio = 768 / max_size
34
  images = [torch.nn.functional.interpolate(img, scale_factor=ratio, mode='bicubic', align_corners=True) for img
35
  in images]
36
  # transform color image to gray image
37
-
38
  result = model.forward_viz(images, layer=7, x=x, y=y)
39
  flow = result['flow']
40
  attention = result['attention']
 
27
  images = images[:11]
28
  # transform images to a list of images tensor
29
  images = [torch.from_numpy(img).permute(2, 0, 1).float().to(device).unsqueeze(0) / 255.0 for img in images]
30
+ # if the max size of the image is larger than 1024, resize the image to 768 with same ratio
31
  max_size = max(images[0].shape[2], images[0].shape[3])
32
  if max_size > 768:
33
  ratio = 768 / max_size
34
  images = [torch.nn.functional.interpolate(img, scale_factor=ratio, mode='bicubic', align_corners=True) for img
35
  in images]
36
  # transform color image to gray image
37
+
38
  result = model.forward_viz(images, layer=7, x=x, y=y)
39
  flow = result['flow']
40
  attention = result['attention']