hugofloresgarcia commited on
Commit
e0d2d99
1 Parent(s): 87c54d2
Files changed (2) hide show
  1. app.py +19 -7
  2. requirements.txt +2 -0
app.py CHANGED
@@ -1,18 +1,30 @@
 
 
 
1
  from pathlib import Path
2
  import gradio as gr
3
- from pyharp import ModelCard, build_endpoint
 
4
 
5
  # Define the process function
 
6
  def process_fn(input_audio, pitch_shift_amount):
7
  from audiotools import AudioSignal
8
-
 
 
 
9
  sig = AudioSignal(input_audio)
10
- sig.pitch_shift(pitch_shift_amount)
11
 
12
- output_dir = Path("_outputs")
13
- output_dir.mkdir(exist_ok=True)
14
- sig.write(output_dir / "output.wav")
15
- return sig.path_to_file
 
 
 
 
 
16
 
17
  # Create a ModelCard
18
  card = ModelCard(
 
1
+ import torch
2
+ import torchaudio
3
+
4
  from pathlib import Path
5
  import gradio as gr
6
+ import shutil
7
+ from pyharp import ModelCard, build_endpoint, save_and_return_filepath
8
 
9
  # Define the process function
10
+ @torch.inference_mode()
11
  def process_fn(input_audio, pitch_shift_amount):
12
  from audiotools import AudioSignal
13
+
14
+ if isinstance(pitch_shift_amount, torch.Tensor):
15
+ pitch_shift_amount = pitch_shift_amount.long().item()
16
+
17
  sig = AudioSignal(input_audio)
 
18
 
19
+ ps = torchaudio.transforms.PitchShift(
20
+ sig.sample_rate,
21
+ n_steps=pitch_shift_amount,
22
+ bins_per_octave=12,
23
+ n_fft=512
24
+ )
25
+ sig.audio_data = ps(sig.audio_data)
26
+
27
+ return save_and_return_filepath(sig)
28
 
29
  # Create a ModelCard
30
  card = ModelCard(
requirements.txt CHANGED
@@ -1,2 +1,4 @@
 
 
1
  descript-audiotools
2
  -e git+https://github.com/audacitorch/pyharp.git#egg=pyharp
 
1
+ torch
2
+ torchaudio
3
  descript-audiotools
4
  -e git+https://github.com/audacitorch/pyharp.git#egg=pyharp