Blane187 John6666 commited on
Commit
6c308d9
1 Parent(s): 5d3a168

Upload 2 files (#8)

Browse files

- Upload 2 files (a8dd45ce81e2e148a54a1cd83c76a828faf0a881)


Co-authored-by: John Smith <John6666@users.noreply.huggingface.co>

Files changed (2) hide show
  1. app.py +162 -168
  2. dowoad_param.py +7 -5
app.py CHANGED
@@ -1,168 +1,162 @@
1
- import gradio as gr
2
- from rvc_infer import infer_audio
3
- import os
4
- import re
5
- import random
6
- from scipy.io.wavfile import write
7
- from scipy.io.wavfile import read
8
- import numpy as np
9
- import yt_dlp
10
- import subprocess
11
-
12
- print("downloading RVC models")
13
- os.system("python dowoad_param.py")
14
-
15
-
16
- BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
17
-
18
- rvc_models_dir = os.path.join(BASE_DIR, 'models')
19
-
20
-
21
-
22
- def update_models_list():
23
- models_l = get_current_models(rvc_models_dir)
24
- return gr.update(choices=models_l)
25
-
26
-
27
-
28
- def extract_zip(extraction_folder, zip_name):
29
- os.makedirs(extraction_folder)
30
- with zipfile.ZipFile(zip_name, 'r') as zip_ref:
31
- zip_ref.extractall(extraction_folder)
32
- os.remove(zip_name)
33
-
34
- index_filepath, model_filepath = None, None
35
- for root, dirs, files in os.walk(extraction_folder):
36
- for name in files:
37
- if name.endswith('.index') and os.stat(os.path.join(root, name)).st_size > 1024 * 100:
38
- index_filepath = os.path.join(root, name)
39
-
40
- if name.endswith('.pth') and os.stat(os.path.join(root, name)).st_size > 1024 * 1024 * 40:
41
- model_filepath = os.path.join(root, name)
42
-
43
- if not model_filepath:
44
- raise gr.Error(f'No .pth model file was found in the extracted zip. Please check {extraction_folder}.')
45
-
46
- # move model and index file to extraction folder
47
- os.rename(model_filepath, os.path.join(extraction_folder, os.path.basename(model_filepath)))
48
- if index_filepath:
49
- os.rename(index_filepath, os.path.join(extraction_folder, os.path.basename(index_filepath)))
50
-
51
- # remove any unnecessary nested folders
52
- for filepath in os.listdir(extraction_folder):
53
- if os.path.isdir(os.path.join(extraction_folder, filepath)):
54
- shutil.rmtree(os.path.join(extraction_folder, filepath))
55
-
56
-
57
-
58
- def download_online_model(url, dir_name, progress=gr.Progress()):
59
- try:
60
- progress(0, desc=f'[~] Downloading voice model with name {dir_name}...')
61
- zip_name = url.split('/')[-1]
62
- extraction_folder = os.path.join(rvc_models_dir, dir_name)
63
- if os.path.exists(extraction_folder):
64
- raise gr.Error(f'Voice model directory {dir_name} already exists! Choose a different name for your voice model.')
65
-
66
- if 'pixeldrain.com' in url:
67
- url = f'https://pixeldrain.com/api/file/{zip_name}'
68
-
69
- urllib.request.urlretrieve(url, zip_name)
70
-
71
- progress(0.5, desc='[~] Extracting zip...')
72
- extract_zip(extraction_folder, zip_name)
73
- return f'[+] {dir_name} Model successfully downloaded!'
74
-
75
- except Exception as e:
76
- raise gr.Error(str(e))
77
-
78
- def download_audio(url):
79
- ydl_opts = {
80
- 'format': 'bestaudio/best',
81
- 'outtmpl': 'ytdl/%(title)s.%(ext)s',
82
- 'postprocessors': [{
83
- 'key': 'FFmpegExtractAudio',
84
- 'preferredcodec': 'wav',
85
- 'preferredquality': '192',
86
- }],
87
- }
88
-
89
- with yt_dlp.YoutubeDL(ydl_opts) as ydl:
90
- info_dict = ydl.extract_info(url, download=True)
91
- file_path = ydl.prepare_filename(info_dict).rsplit('.', 1)[0] + '.wav'
92
- sample_rate, audio_data = read(file_path)
93
- audio_array = np.asarray(audio_data, dtype=np.int16)
94
-
95
- return sample_rate, audio_array
96
-
97
-
98
- CSS = """
99
- """
100
-
101
- with gr.Blocks(theme="Hev832/Applio", fill_width=True, css=CSS) as demo:
102
- gr.Markdown("# RVC INFER DEMOS ")
103
- gr.Markdown(f"# recommended using colab version with more feature!<br> [![Open In Collab](https://img.shields.io/badge/google_colab-F9AB00?style=flat-square&logo=googlecolab&logoColor=white)](https://colab.research.google.com/drive/1bM1LB2__WNFxX8pyZmUPQZYq7dg58YWG?usp=sharing) ")
104
- with gr.Tab("Inferenece"):
105
- gr.Markdown("in progress")
106
- model_name = gr.Dropdown(model_name, label='Voice Models', info='Models folder "rvc_infer --> models". After new models are added into this folder, click the refresh button')
107
- ref_btn = gr.Button('Refresh Models', variant='primary')
108
- input_audio = gr.Audio(label="Input Audio", type="filepath")
109
- with gr.Accordion("Settings", open=False):
110
- f0_change = gr.Slider(label="f0 change", minimum=-12, maximum=12, step=1, value=0)
111
- f0_method = gr.Dropdown(label="f0 method", choices=["rmvpe+", "rmvpe", "fcpe", " hybrid[rmvpe+fcpe]"], value="rmvpe+")
112
- min_pitch = gr.Textbox(label="min pitch", lines=1, value="-12")
113
- max_pitch = gr.Textbox(label="max pitch", lines=1, value="12")
114
- crepe_hop_length = gr.Slider(label="crepe_hop_length", minimum=0, maximum=256, step=1, value=128)
115
- index_rate = gr.Slider(label="index_rate", minimum=0, maximum=1.0, step=0.01, value=0.75)
116
- filter_radius = gr.Slider(label="filter_radius", minimum=0, maximum=10.0, step=0.01, value=3)
117
- rms_mix_rate = gr.Slider(label="rms_mix_rate", minimum=0, maximum=1.0, step=0.01, value=0.25)
118
- protect = gr.Slider(label="protect", minimum=0, maximum=1.0, step=0.01, value=0.33)
119
- with gr.Accordion("Advanced Settings", open=False):
120
- split_infer = gr.Checkbox(label="split_infer", value=False)
121
- min_silence = gr.Slider(label="min_silence", minimum=0, maximum=1000, step=1, value=500)
122
- silence_threshold = gr.Slider(label="silence_threshold", minimum=-1000, maximum=1000, step=1, value=-50)
123
- seek_step = gr.Slider(label="seek_step", minimum=0, maximum=100, step=1, value=0)
124
- keep_silence = gr.Slider(label="keep_silence", minimum=-1000, maximum=1000, step=1, value=100)
125
- do_formant = gr.Checkbox(label="do_formant", value=False)
126
- quefrency = gr.Slider(label="quefrency", minimum=0, maximum=100, step=1, value=0)
127
- timbre = gr.Slider(label="timbre", minimum=0, maximum=100, step=1, value=1)
128
- f0_autotune = gr.Checkbox(label="f0_autotune", value=False)
129
- audio_format = gr.Dropdown(label="audio_format", choices=["wav"], value="wav", visible=False)
130
- resample_sr = gr.Slider(label="resample_sr", minimum=0, maximum=100, step=1, value=0)
131
- hubert_model_path = gr.Textbox(label="hubert_model_path", lines=1, value="hubert_base.pt", visible=False)
132
- rmvpe_model_path = gr.Textbox(label="rmvpe_model_path", lines=1, value="rmvpe.pt", visible=False)
133
- fcpe_model_path = gr.Textbox(label="fcpe_model_path", lines=1, value="fcpe.pt", visible=False)
134
- submit_inference = gr.Button('Inference', variant='primary')
135
- result_audio = gr.Audio("Output Audio")
136
-
137
- with gr.Tab("Download Model"):
138
- gr.Markdown("## Download Model for infernece")
139
- url_input = gr.Textbox(label="Model URL", placeholder="Enter the URL of the model")
140
- dir_name_input = gr.Textbox(label="Directory Name", placeholder="Enter the directory name")
141
- output = gr.Textbox(label="Output Models")
142
- download_button = gr.Button("Download Model")
143
- download_button.click(download_online_model, inputs=[url_input, dir_name_input], outputs=output)
144
-
145
- with gr.Tab(" Credits"):
146
- gr.Markdown(
147
- """
148
- this project made by [Blane187](https://huggingface.co/Blane187) with Improvements by [John6666](https://huggingfce.co/John6666)
149
- """)
150
-
151
-
152
-
153
- ref_btn.click(update_models_list, None, outputs=rvc_model)
154
- gr.on(
155
- triggers=[submit_inference.click],
156
- fn=infer_audio,
157
- inputs=[model_name, input_audio, f0_change, f0_method, min_pitch, max_pitch, crepe_hop_length, index_rate,
158
- filter_radius, rms_mix_rate, protect, split_infer, min_silence, silence_threshold, seek_step,
159
- keep_silence, do_formant, quefrency, timbre, f0_autotune, audio_format, resample_sr,
160
- hubert_model_path, rmvpe_model_path, fcpe_model_path],
161
- outputs=[result_audio],
162
- queue=True,
163
- show_api=True,
164
- show_progress="full",
165
- )
166
-
167
- demo.queue()
168
- demo.launch(debug=True,share=True,show_api=False)
 
1
+ import gradio as gr
2
+ from rvc_infer import infer_audio, get_current_models
3
+ import os
4
+ import re
5
+ import random
6
+ from scipy.io.wavfile import write
7
+ from scipy.io.wavfile import read
8
+ import numpy as np
9
+ import yt_dlp
10
+ import subprocess
11
+ import zipfile
12
+ import shutil
13
+ import urllib
14
+
15
+ print("downloading RVC models")
16
+ os.system("python dowoad_param.py")
17
+
18
+ BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
19
+
20
+ rvc_models_dir = os.path.join(BASE_DIR, 'models')
21
+
22
+ def update_models_list():
23
+ models_l = get_current_models(rvc_models_dir)
24
+ return gr.update(choices=models_l)
25
+
26
+ def extract_zip(extraction_folder, zip_name):
27
+ os.makedirs(extraction_folder)
28
+ with zipfile.ZipFile(zip_name, 'r') as zip_ref:
29
+ zip_ref.extractall(extraction_folder)
30
+ os.remove(zip_name)
31
+
32
+ index_filepath, model_filepath = None, None
33
+ for root, dirs, files in os.walk(extraction_folder):
34
+ for name in files:
35
+ if name.endswith('.index') and os.stat(os.path.join(root, name)).st_size > 1024 * 100:
36
+ index_filepath = os.path.join(root, name)
37
+
38
+ if name.endswith('.pth') and os.stat(os.path.join(root, name)).st_size > 1024 * 1024 * 40:
39
+ model_filepath = os.path.join(root, name)
40
+
41
+ if not model_filepath:
42
+ raise gr.Error(f'No .pth model file was found in the extracted zip. Please check {extraction_folder}.')
43
+
44
+ # move model and index file to extraction folder
45
+ os.rename(model_filepath, os.path.join(extraction_folder, os.path.basename(model_filepath)))
46
+ if index_filepath:
47
+ os.rename(index_filepath, os.path.join(extraction_folder, os.path.basename(index_filepath)))
48
+
49
+ # remove any unnecessary nested folders
50
+ for filepath in os.listdir(extraction_folder):
51
+ if os.path.isdir(os.path.join(extraction_folder, filepath)):
52
+ shutil.rmtree(os.path.join(extraction_folder, filepath))
53
+
54
+ def download_online_model(url, dir_name, progress=gr.Progress()):
55
+ try:
56
+ progress(0, desc=f'[~] Downloading voice model with name {dir_name}...')
57
+ zip_name = url.split('/')[-1]
58
+ extraction_folder = os.path.join(rvc_models_dir, dir_name)
59
+ if os.path.exists(extraction_folder):
60
+ raise gr.Error(f'Voice model directory {dir_name} already exists! Choose a different name for your voice model.')
61
+
62
+ if 'pixeldrain.com' in url:
63
+ url = f'https://pixeldrain.com/api/file/{zip_name}'
64
+
65
+ urllib.request.urlretrieve(url, zip_name)
66
+
67
+ progress(0.5, desc='[~] Extracting zip...')
68
+ extract_zip(extraction_folder, zip_name)
69
+ return f'[+] {dir_name} Model successfully downloaded!'
70
+
71
+ except Exception as e:
72
+ raise gr.Error(str(e))
73
+
74
+ def download_audio(url):
75
+ ydl_opts = {
76
+ 'format': 'bestaudio/best',
77
+ 'outtmpl': 'ytdl/%(title)s.%(ext)s',
78
+ 'postprocessors': [{
79
+ 'key': 'FFmpegExtractAudio',
80
+ 'preferredcodec': 'wav',
81
+ 'preferredquality': '192',
82
+ }],
83
+ }
84
+
85
+ with yt_dlp.YoutubeDL(ydl_opts) as ydl:
86
+ info_dict = ydl.extract_info(url, download=True)
87
+ file_path = ydl.prepare_filename(info_dict).rsplit('.', 1)[0] + '.wav'
88
+ sample_rate, audio_data = read(file_path)
89
+ audio_array = np.asarray(audio_data, dtype=np.int16)
90
+
91
+ return sample_rate, audio_array
92
+
93
+
94
+ CSS = """
95
+ """
96
+
97
+ with gr.Blocks(theme="Hev832/Applio", fill_width=True, css=CSS) as demo:
98
+ gr.Markdown("# RVC INFER DEMOS ")
99
+ gr.Markdown(f"# recommended using colab version with more feature!<br> [![Open In Collab](https://img.shields.io/badge/google_colab-F9AB00?style=flat-square&logo=googlecolab&logoColor=white)](https://colab.research.google.com/drive/1bM1LB2__WNFxX8pyZmUPQZYq7dg58YWG?usp=sharing) ")
100
+ with gr.Tab("Inferenece"):
101
+ gr.Markdown("in progress")
102
+ model_name = gr.Dropdown(label='Voice Models', info='Models folder "rvc_infer --> models". After new models are added into this folder, click the refresh button')
103
+ ref_btn = gr.Button('Refresh Models', variant='primary')
104
+ input_audio = gr.Audio(label="Input Audio", type="filepath")
105
+ with gr.Accordion("Settings", open=False):
106
+ f0_change = gr.Slider(label="f0 change", minimum=-12, maximum=12, step=1, value=0)
107
+ f0_method = gr.Dropdown(label="f0 method", choices=["rmvpe+", "rmvpe", "fcpe", " hybrid[rmvpe+fcpe]"], value="rmvpe+")
108
+ min_pitch = gr.Textbox(label="min pitch", lines=1, value="-12")
109
+ max_pitch = gr.Textbox(label="max pitch", lines=1, value="12")
110
+ crepe_hop_length = gr.Slider(label="crepe_hop_length", minimum=0, maximum=256, step=1, value=128)
111
+ index_rate = gr.Slider(label="index_rate", minimum=0, maximum=1.0, step=0.01, value=0.75)
112
+ filter_radius = gr.Slider(label="filter_radius", minimum=0, maximum=10.0, step=0.01, value=3)
113
+ rms_mix_rate = gr.Slider(label="rms_mix_rate", minimum=0, maximum=1.0, step=0.01, value=0.25)
114
+ protect = gr.Slider(label="protect", minimum=0, maximum=1.0, step=0.01, value=0.33)
115
+ with gr.Accordion("Advanced Settings", open=False):
116
+ split_infer = gr.Checkbox(label="split_infer", value=False)
117
+ min_silence = gr.Slider(label="min_silence", minimum=0, maximum=1000, step=1, value=500)
118
+ silence_threshold = gr.Slider(label="silence_threshold", minimum=-1000, maximum=1000, step=1, value=-50)
119
+ seek_step = gr.Slider(label="seek_step", minimum=0, maximum=100, step=1, value=0)
120
+ keep_silence = gr.Slider(label="keep_silence", minimum=-1000, maximum=1000, step=1, value=100)
121
+ do_formant = gr.Checkbox(label="do_formant", value=False)
122
+ quefrency = gr.Slider(label="quefrency", minimum=0, maximum=100, step=1, value=0)
123
+ timbre = gr.Slider(label="timbre", minimum=0, maximum=100, step=1, value=1)
124
+ f0_autotune = gr.Checkbox(label="f0_autotune", value=False)
125
+ audio_format = gr.Dropdown(label="audio_format", choices=["wav"], value="wav", visible=False)
126
+ resample_sr = gr.Slider(label="resample_sr", minimum=0, maximum=100, step=1, value=0)
127
+ hubert_model_path = gr.Textbox(label="hubert_model_path", lines=1, value="hubert_base.pt", visible=False)
128
+ rmvpe_model_path = gr.Textbox(label="rmvpe_model_path", lines=1, value="rmvpe.pt", visible=False)
129
+ fcpe_model_path = gr.Textbox(label="fcpe_model_path", lines=1, value="fcpe.pt", visible=False)
130
+ submit_inference = gr.Button('Inference', variant='primary')
131
+ result_audio = gr.Audio("Output Audio")
132
+
133
+ with gr.Tab("Download Model"):
134
+ gr.Markdown("## Download Model for infernece")
135
+ url_input = gr.Textbox(label="Model URL", placeholder="Enter the URL of the model")
136
+ dir_name_input = gr.Textbox(label="Directory Name", placeholder="Enter the directory name")
137
+ output = gr.Textbox(label="Output Models")
138
+ download_button = gr.Button("Download Model")
139
+ download_button.click(download_online_model, inputs=[url_input, dir_name_input], outputs=output)
140
+
141
+ with gr.Tab(" Credits"):
142
+ gr.Markdown(
143
+ """
144
+ this project made by [Blane187](https://huggingface.co/Blane187) with Improvements by [John6666](https://huggingfce.co/John6666)
145
+ """)
146
+
147
+ ref_btn.click(update_models_list, None, outputs=model_name)
148
+ gr.on(
149
+ triggers=[submit_inference.click],
150
+ fn=infer_audio,
151
+ inputs=[model_name, input_audio, f0_change, f0_method, min_pitch, max_pitch, crepe_hop_length, index_rate,
152
+ filter_radius, rms_mix_rate, protect, split_infer, min_silence, silence_threshold, seek_step,
153
+ keep_silence, do_formant, quefrency, timbre, f0_autotune, audio_format, resample_sr,
154
+ hubert_model_path, rmvpe_model_path, fcpe_model_path],
155
+ outputs=[result_audio],
156
+ queue=True,
157
+ show_api=True,
158
+ show_progress="full",
159
+ )
160
+
161
+ demo.queue()
162
+ demo.launch(debug=True,share=True,show_api=False)
 
 
 
 
 
 
dowoad_param.py CHANGED
@@ -1,6 +1,8 @@
1
- def dowoad_param():
2
- os.system("wget https://huggingface.co/datasets/ylzz1997/rmvpe_pretrain_model/resolve/main/fcpe.pt -O fcpe.pt")
3
- os.system("wget https://huggingface.co/Kit-Lemonfoot/RVC_DidntAsk/resolve/main/hubert_base.pt -O hubert_base.pt")
4
- os.system("wget https://huggingface.co/Kit-Lemonfoot/RVC_DidntAsk/resolve/main/rmvpe.pt -O rmvpe.pt")
5
-
 
 
6
  dowoad_param()
 
1
+ import os
2
+
3
+ def dowoad_param():
4
+ os.system("wget https://huggingface.co/datasets/ylzz1997/rmvpe_pretrain_model/resolve/main/fcpe.pt -O fcpe.pt")
5
+ os.system("wget https://huggingface.co/Kit-Lemonfoot/RVC_DidntAsk/resolve/main/hubert_base.pt -O hubert_base.pt")
6
+ os.system("wget https://huggingface.co/Kit-Lemonfoot/RVC_DidntAsk/resolve/main/rmvpe.pt -O rmvpe.pt")
7
+
8
  dowoad_param()