hugofloresgarcia commited on
Commit
ba41108
β€’
1 Parent(s): e0d2d99
Files changed (3) hide show
  1. README.md +1 -1
  2. _outputs/output.wav +0 -0
  3. app.py +7 -4
README.md CHANGED
@@ -4,7 +4,7 @@ emoji: πŸ†
4
  colorFrom: indigo
5
  colorTo: gray
6
  sdk: gradio
7
- sdk_version: 3.45.1
8
  app_file: app.py
9
  pinned: false
10
  license: mit
 
4
  colorFrom: indigo
5
  colorTo: gray
6
  sdk: gradio
7
+ sdk_version: 4.7.1
8
  app_file: app.py
9
  pinned: false
10
  license: mit
_outputs/output.wav ADDED
Binary file (441 kB). View file
 
app.py CHANGED
@@ -8,13 +8,13 @@ from pyharp import ModelCard, build_endpoint, save_and_return_filepath
8
 
9
  # Define the process function
10
  @torch.inference_mode()
11
- def process_fn(input_audio, pitch_shift_amount):
12
  from audiotools import AudioSignal
13
 
14
  if isinstance(pitch_shift_amount, torch.Tensor):
15
  pitch_shift_amount = pitch_shift_amount.long().item()
16
 
17
- sig = AudioSignal(input_audio)
18
 
19
  ps = torchaudio.transforms.PitchShift(
20
  sig.sample_rate,
@@ -24,7 +24,9 @@ def process_fn(input_audio, pitch_shift_amount):
24
  )
25
  sig.audio_data = ps(sig.audio_data)
26
 
27
- return save_and_return_filepath(sig)
 
 
28
 
29
  # Create a ModelCard
30
  card = ModelCard(
@@ -57,6 +59,7 @@ with gr.Blocks() as demo:
57
  output = gr.Audio(label="Audio Output", type="filepath")
58
 
59
  # Build the endpoint
60
- ctrls_data, ctrls_button, process_button = build_endpoint(inputs, output, process_fn, card)
61
 
 
62
  demo.launch(share=True)
 
8
 
9
  # Define the process function
10
  @torch.inference_mode()
11
+ def process_fn(input_audio_path, pitch_shift_amount):
12
  from audiotools import AudioSignal
13
 
14
  if isinstance(pitch_shift_amount, torch.Tensor):
15
  pitch_shift_amount = pitch_shift_amount.long().item()
16
 
17
+ sig = AudioSignal(input_audio_path)
18
 
19
  ps = torchaudio.transforms.PitchShift(
20
  sig.sample_rate,
 
24
  )
25
  sig.audio_data = ps(sig.audio_data)
26
 
27
+ output_audio_path = save_and_return_filepath(sig)
28
+
29
+ return output_audio_path
30
 
31
  # Create a ModelCard
32
  card = ModelCard(
 
59
  output = gr.Audio(label="Audio Output", type="filepath")
60
 
61
  # Build the endpoint
62
+ widgets = build_endpoint(inputs, output, process_fn, card)
63
 
64
+ demo.queue()
65
  demo.launch(share=True)