Spaces:
Runtime error
Runtime error
| import gradio as gr | |
| from audio_style_transfer.models import timedomain | |
| import librosa | |
| def audioStyleTransfer(content,style): | |
| print (style,content) | |
| output = "/tmp/outputfile.wav" | |
| sr = librosa.get_samplerate(content) | |
| timedomain.run( style_fname=style, content_fname=content ,output_fname=output, n_fft=4096, | |
| n_filters=4096, | |
| hop_length=256, | |
| alpha=0.05, | |
| k_w=4, # 1 to 3. Lower is better quality but is slower. | |
| sr=sr) | |
| print ("output is " ,output) | |
| return output | |
| iface = gr.Interface(fn=audioStyleTransfer, title="Time Domain Audio Style transfer", | |
| description="Forked from https://github.com/pkmital/time-domain-neural-audio-style-transfer Built to style transfer to audio using style audio.\ | |
| it seems to work best for shorter clips", inputs=[gr.Audio(source="upload",type="filepath",label="Content"),gr.Audio(source="upload",type="filepath",label="Style")], outputs=gr.Audio(label="Output")) | |
| iface.launch() |