fffiloni commited on
Commit
81b9dd6
1 Parent(s): 3a87a23

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -9
app.py CHANGED
@@ -41,8 +41,8 @@ def infer():
41
 
42
  frames, _, _ = read_video(str(video_path), output_format="TCHW")
43
 
44
- img1= frames[100]
45
- img2 = frames[101]
46
 
47
 
48
 
@@ -50,15 +50,15 @@ def infer():
50
  transforms = weights.transforms()
51
 
52
 
53
- def preprocess(img1, img2):
54
- img1 = F.resize(img1, size=[520, 960])
55
- img2 = F.resize(img2, size=[520, 960])
56
- return transforms(img1, img2)
57
 
58
 
59
- img1, img2 = preprocess(img1, img2)
60
 
61
- print(f"shape = {img1.shape}, dtype = {img1.dtype}")
62
 
63
 
64
  ####################################
@@ -78,7 +78,7 @@ def infer():
78
  model = raft_large(weights=Raft_Large_Weights.DEFAULT, progress=False).to(device)
79
  model = model.eval()
80
 
81
- list_of_flows = model(img1.to(device), img2.to(device))
82
  print(f"type = {type(list_of_flows)}")
83
  print(f"length = {len(list_of_flows)} = number of iterations of the model")
84
 
 
41
 
42
  frames, _, _ = read_video(str(video_path), output_format="TCHW")
43
 
44
+ img1_batch = torch.stack([frames[100]])
45
+ img2_batch = torch.stack([frames[101]])
46
 
47
 
48
 
 
50
  transforms = weights.transforms()
51
 
52
 
53
+ def preprocess(img1_batch, img2_batch):
54
+ img1_batch = F.resize(img1_batch, size=[520, 960])
55
+ img2_batch = F.resize(img2_batch, size=[520, 960])
56
+ return transforms(img1_batch, img2_batch)
57
 
58
 
59
+ img1_batch, img2_batch = preprocess(img1_batch, img2_batch)
60
 
61
+ print(f"shape = {img1_batch.shape}, dtype = {img1_batch.dtype}")
62
 
63
 
64
  ####################################
 
78
  model = raft_large(weights=Raft_Large_Weights.DEFAULT, progress=False).to(device)
79
  model = model.eval()
80
 
81
+ list_of_flows = model(img1_batch.to(device), img2_batch.to(device))
82
  print(f"type = {type(list_of_flows)}")
83
  print(f"length = {len(list_of_flows)} = number of iterations of the model")
84