Spaces:
Running
Running
import wave | |
import gradio as gr | |
pretrains = { | |
"OV2-32k": { | |
"D": "https://huggingface.co/poiqazwsx/Ov2Super32kfix/resolve/main/f0Ov2Super32kD.pth", | |
"G": "https://huggingface.co/poiqazwsx/Ov2Super32kfix/resolve/main/f0Ov2Super32kG.pth" | |
}, | |
"OV2-40k": { | |
"D": "https://huggingface.co/ORVC/Ov2Super/resolve/main/f0Ov2Super40kD.pth", | |
"G": "https://huggingface.co/ORVC/Ov2Super/resolve/main/f0Ov2Super40kG.pth" | |
}, | |
"RIN-40k": { | |
"D": "https://huggingface.co/ORVC/RIN_E3/resolve/main/RIN_E3_D.pth", | |
"G": "https://huggingface.co/ORVC/RIN_E3/resolve/main/RIN_E3_G.pth" | |
}, | |
"Snowie-40k": { | |
"D": "https://huggingface.co/ORVC/Snowie_RuPretrain_40k/resolve/main/D_Snowie_RuPretrain_EnP.pth", | |
"G": "https://huggingface.co/ORVC/Snowie_RuPretrain_40k/resolve/main/G_Snowie_RuPretrain_EnP.pth" | |
}, | |
"Snowie-48k": { | |
"D": "https://huggingface.co/ORVC/Snowie_RuPretrain_48k/resolve/main/D_Snowie_Rupretrain_48k_V1.2.pth", | |
"G": "https://huggingface.co/ORVC/Snowie_RuPretrain_48k/resolve/main/G_Snowie_Rupretrain_48k_V1.2.pth" | |
}, | |
"SnowieV3.1-40k": { | |
"D": "https://huggingface.co/MUSTAR/SnowieV3.1-40k/resolve/main/D_SnowieV3.1_40k.pth", | |
"G": "https://huggingface.co/MUSTAR/SnowieV3.1-40k/resolve/main/G_SnowieV3.1_40k.pth" | |
}, | |
"SnowieV3.1-32k": { | |
"D": "https://huggingface.co/MUSTAR/SnowieV3.1-32k/resolve/main/D_SnowieV3.1_32k.pth", | |
"G": "https://huggingface.co/MUSTAR/SnowieV3.1-32k/resolve/main/G_SnowieV3.1_32k.pth" | |
}, | |
"SnowieV3.1-48k": { | |
"D": "https://huggingface.co/MUSTAR/SnowieV3.1-48k/resolve/main/D_SnowieV3.1_48k.pth", | |
"G": "https://huggingface.co/MUSTAR/SnowieV3.1-48k/resolve/main/G_SnowieV3.1_48k.pth" | |
}, | |
"SnowieV3.1-X-RinE3-40k": { | |
"D": "https://huggingface.co/MUSTAR/SnowieV3.1-X-RinE3-40K/resolve/main/D_Snowie-X-Rin_40k.pth", | |
"G": "https://huggingface.co/MUSTAR/SnowieV3.1-X-RinE3-40K/resolve/main/G_Snowie-X-Rin_40k.pth" | |
} | |
} | |
def get_audio_duration(audio_file): | |
with wave.open(audio_file, 'rb') as audio: | |
frames = audio.getnframes() | |
rate = audio.getframerate() | |
duration = frames / float(rate) | |
return duration | |
def get_training_info(audio_file): | |
if audio_file is None: | |
return 'Please provide an audio file!' | |
duration = get_audio_duration(audio_file) | |
sample_rate = wave.open(audio_file, 'rb').getframerate() | |
training_info = { | |
(0, 2): (150, 'OV2'), | |
(2, 3): (200, 'OV2'), | |
(3, 5): (250, 'OV2'), | |
(5, 10): (300, 'Normal'), | |
(10, 25): (500, 'Normal'), | |
(25, 45): (700, 'Normal'), | |
(45, 60): (1000, 'Normal') | |
} | |
for (min_duration, max_duration), (epochs, pretrain) in training_info.items(): | |
if min_duration <= duration < max_duration: | |
break | |
else: | |
return 'Duration is not within the specified range.' | |
return f'You should use the **{pretrain}** pretrain with **{epochs}** epochs at **{sample_rate/1000}kHz** sample rate. Good luck with your training!' | |
with gr.Blocks() as demo: | |
audio_p = gr.Audio(type="filepath", label="Your Audio here") | |
audio_q = gr.Textbox(label="Your Output here") | |
wtar = gr.Button("Start!") | |
wtar.click(get_training_info, inputs=[audio_p], outputs=[audio_q]) | |
demo.launch(debug=True) | |