Junfeng5 commited on
Commit
da8b065
1 Parent(s): f53375f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -116,7 +116,7 @@ pixel_mean = torch.Tensor( [123.675, 116.28, 103.53]).to(device).view(3, 1, 1)
116
  pixel_std = torch.Tensor([58.395, 57.12, 57.375]).to(device).view(3, 1, 1)
117
  normalizer = lambda x: (x - pixel_mean) / pixel_std
118
  inference_size = 800
119
- video_inference_size = 720
120
  inference_type = 'resize_shot' # or LSJ
121
  size_divisibility = 32
122
 
@@ -414,11 +414,11 @@ def segment_video(video, prompt_mode, categoryname, custom_category, expressiong
414
  if model_selection == 'GLEE-Plus (SwinL)':
415
  GLEEmodel = GLEEmodel_swin
416
  print('use GLEE-Plus')
417
- clip_length = 4 #batchsize
418
  else:
419
  GLEEmodel = GLEEmodel_r50
420
  print('use GLEE-Lite')
421
- clip_length = 8 #batchsize
422
 
423
  # read video and get sparse frames
424
  cap = cv2.VideoCapture(video)
 
116
  pixel_std = torch.Tensor([58.395, 57.12, 57.375]).to(device).view(3, 1, 1)
117
  normalizer = lambda x: (x - pixel_mean) / pixel_std
118
  inference_size = 800
119
+ video_inference_size = 640
120
  inference_type = 'resize_shot' # or LSJ
121
  size_divisibility = 32
122
 
 
414
  if model_selection == 'GLEE-Plus (SwinL)':
415
  GLEEmodel = GLEEmodel_swin
416
  print('use GLEE-Plus')
417
+ clip_length = 2 #batchsize
418
  else:
419
  GLEEmodel = GLEEmodel_r50
420
  print('use GLEE-Lite')
421
+ clip_length = 4 #batchsize
422
 
423
  # read video and get sparse frames
424
  cap = cv2.VideoCapture(video)