Hbvsa commited on
Commit
0d7fc55
1 Parent(s): 266aca5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -4
app.py CHANGED
@@ -2,7 +2,6 @@ import subprocess
2
  import sys
3
  from os.path import abspath, dirname,join
4
  sys.path.append(join(dirname(abspath(__file__)),'GroundingDINO'))
5
- print(join(dirname(abspath(__file__)),'GroundingDINO'))
6
  def run_commands():
7
  commands = [
8
  "git clone https://github.com/IDEA-Research/GroundingDINO.git",
@@ -97,7 +96,7 @@ if __name__ == "__main__":
97
  logit, predicted = torch.max(output.data, 1)
98
  return self.labels[predicted[0].item()], logit[0].item()
99
 
100
-
101
  class VideoObjectDetection:
102
 
103
  def __init__(self,
@@ -167,6 +166,7 @@ if __name__ == "__main__":
167
  image_transformed, _ = transform(image_pillow, None)
168
  return image_transformed
169
 
 
170
  def generate_video(self, video_path) -> None:
171
 
172
  # Load model, set up variables and get video properties
@@ -218,7 +218,6 @@ if __name__ == "__main__":
218
  frame_count += 1
219
 
220
 
221
- @spaces.GPU(duration=200)
222
  def video_object_classification_pipeline():
223
  video_annotator = VideoObjectDetection(
224
  text_prompt='human face')
@@ -232,5 +231,4 @@ if __name__ == "__main__":
232
 
233
  iface.launch(share=False, debug=True)
234
 
235
- print("Só me falta a GPU")
236
  video_object_classification_pipeline()
 
2
  import sys
3
  from os.path import abspath, dirname,join
4
  sys.path.append(join(dirname(abspath(__file__)),'GroundingDINO'))
 
5
  def run_commands():
6
  commands = [
7
  "git clone https://github.com/IDEA-Research/GroundingDINO.git",
 
96
  logit, predicted = torch.max(output.data, 1)
97
  return self.labels[predicted[0].item()], logit[0].item()
98
 
99
+
100
  class VideoObjectDetection:
101
 
102
  def __init__(self,
 
166
  image_transformed, _ = transform(image_pillow, None)
167
  return image_transformed
168
 
169
+ @spaces.GPU(duration=30)
170
  def generate_video(self, video_path) -> None:
171
 
172
  # Load model, set up variables and get video properties
 
218
  frame_count += 1
219
 
220
 
 
221
  def video_object_classification_pipeline():
222
  video_annotator = VideoObjectDetection(
223
  text_prompt='human face')
 
231
 
232
  iface.launch(share=False, debug=True)
233
 
 
234
  video_object_classification_pipeline()