Jeremy Hummel commited on
Commit
f61edd1
·
1 Parent(s): c5b2d3d

Updates example caching

Browse files
Files changed (2) hide show
  1. app.py +2 -2
  2. visualize.py +1 -2
app.py CHANGED
@@ -70,8 +70,8 @@ article = \
70
  """
71
 
72
  examples = [
73
- ["examples/Maple_leaf_rag_-_played_by_Scott_Joplin_1916_V2.ogg", network_choices[0], 1.0, 0.25, 0.5, 512, 45],
74
- ["examples/Muriel-Nguyen-Xuan-Beethovens-Moonlight-Sonata-mvt-3.ogx", network_choices[4], 1.2, 0.3, 0.5, 384, 22],
75
  ]
76
 
77
  demo = gr.Interface(
 
70
  """
71
 
72
  examples = [
73
+ ["examples/Maple_leaf_rag_-_played_by_Scott_Joplin_1916_V2.ogg", network_choices[0], 1.0, 0.25, 0.5, 512, 600, "example1.mp4"],
74
+ ["examples/Muriel-Nguyen-Xuan-Beethovens-Moonlight-Sonata-mvt-3.ogx", network_choices[4], 1.2, 0.3, 0.5, 384, 600, "example2.mp4"],
75
  ]
76
 
77
  demo = gr.Interface(
visualize.py CHANGED
@@ -16,6 +16,7 @@ def visualize(audio_file,
16
  jitter,
17
  frame_length,
18
  duration,
 
19
  ):
20
  print(audio_file)
21
 
@@ -56,8 +57,6 @@ def visualize(audio_file,
56
 
57
  tempo_sensitivity = tempo_sensitivity * frame_length / 512
58
 
59
- outfile = "output.mp4"
60
-
61
  # Load pre-trained model
62
  device = torch.device('cuda')
63
  with dnnlib.util.open_url(network) as f:
 
16
  jitter,
17
  frame_length,
18
  duration,
19
+ outfile="output.mp4",
20
  ):
21
  print(audio_file)
22
 
 
57
 
58
  tempo_sensitivity = tempo_sensitivity * frame_length / 512
59
 
 
 
60
  # Load pre-trained model
61
  device = torch.device('cuda')
62
  with dnnlib.util.open_url(network) as f: