import io import os os.system("wget -P hubert/ https://huggingface.co/spaces/innnky/nanami/resolve/main/checkpoint_best_legacy_500.pt") import gradio as gr import librosa import numpy as np import soundfile from inference.infer_tool import Svc import logging logging.getLogger('numba').setLevel(logging.WARNING) logging.getLogger('markdown_it').setLevel(logging.WARNING) logging.getLogger('urllib3').setLevel(logging.WARNING) logging.getLogger('matplotlib').setLevel(logging.WARNING) model = Svc("logs/44k/G_199200.pth", "logs/44k/config.json", cluster_model_path="logs/44k/kmeans_10000.pt") def predict(input_audio, not_singing): if input_audio is None: return "You need to upload an audio", None sampling_rate, audio = input_audio duration = audio.shape[0] / sampling_rate if duration > 45: return "Please upload audio less than 45 seconds", None audio = (audio / np.iinfo(audio.dtype).max).astype(np.float32) if len(audio.shape) > 1: audio = librosa.to_mono(audio.transpose(1, 0)) if sampling_rate != 16000: audio = librosa.resample(audio, orig_sr=sampling_rate, target_sr=16000) print(audio.shape) out_wav_path = "temp.wav" soundfile.write(out_wav_path, audio, 16000, format="wav") out_audio, out_sr = model.infer("aimodel", 0, out_wav_path, cluster_infer_ratio=0, auto_predict_f0=not_singing, noice_scale=0.4 ) return (44100, out_audio.numpy()) audio_input = gr.Audio(label="Upload Audio") not_singing = gr.Checkbox(label="Check this box if this audio is not singing", value=False) audio_output = gr.Audio(label="Output Audio") demo = gr.Interface(predict, inputs=[audio_input, not_singing], outputs=[audio_output]) # app = gr.Blocks() # with app: # audio_input = gr.Audio(label="Upload Audio") # not_singing = gr.Checkbox(label="Check this box if this audio is not singing", value=False) # audio_output = gr.Audio(label="Output Audio") # submit_btn = gr.Button("Submit", variant="primary") # submit_btn.click(predict, [audio_input, not_singing], [audio_output], api_name="predict") demo.launch()