R-Kentaren commited on
Commit
61c95eb
·
verified ·
1 Parent(s): 5cf4844

Upload folder using huggingface_hub

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .env +49 -0
  2. .gitattributes +27 -60
  3. .gitignore +28 -0
  4. CONTRIBUTING.md +11 -0
  5. Dockerfile +46 -0
  6. Dwayne_cut_2.mp3 +3 -0
  7. LICENSE +23 -0
  8. MIT协议暨相关引用库协议 +45 -0
  9. README.md +1 -0
  10. __pycache__/easy_sync.cpython-311.pyc +0 -0
  11. a.png +3 -0
  12. assets/Synthesizer_inputs.pth +3 -0
  13. assets/hubert/.gitignore +3 -0
  14. assets/hubert/hubert_inputs.pth +3 -0
  15. assets/indices/.gitignore +2 -0
  16. assets/pretrained/.gitignore +2 -0
  17. assets/pretrained_v2/.gitignore +2 -0
  18. assets/pretrained_v2/f0D32k.pth +3 -0
  19. assets/pretrained_v2/f0G32k.pth +3 -0
  20. assets/pretrained_v2/f0Ov2Super32kD.pth +3 -0
  21. assets/pretrained_v2/f0Ov2Super32kG.pth +3 -0
  22. assets/rmvpe/.gitignore +3 -0
  23. assets/rmvpe/rmvpe_inputs.pth +3 -0
  24. assets/uvr5_weights/.gitignore +2 -0
  25. audio.mp3 +3 -0
  26. audios/astronauts.mp3 +3 -0
  27. audios/output_audio.wav +3 -0
  28. audios/somegirl.mp3 +3 -0
  29. audios/someguy.mp3 +3 -0
  30. audios/unachica.mp3 +3 -0
  31. audios/unchico.mp3 +3 -0
  32. configs/__pycache__/config.cpython-311.pyc +0 -0
  33. configs/config.json +1 -0
  34. configs/config.py +259 -0
  35. configs/inuse/.gitignore +4 -0
  36. configs/inuse/v1/.gitignore +2 -0
  37. configs/inuse/v1/32k.json +46 -0
  38. configs/inuse/v1/40k.json +46 -0
  39. configs/inuse/v1/48k.json +46 -0
  40. configs/inuse/v2/.gitignore +2 -0
  41. configs/inuse/v2/32k.json +46 -0
  42. configs/inuse/v2/48k.json +46 -0
  43. configs/v1/32k.json +46 -0
  44. configs/v1/40k.json +46 -0
  45. configs/v1/48k.json +46 -0
  46. configs/v2/32k.json +46 -0
  47. configs/v2/48k.json +46 -0
  48. demo.py +439 -0
  49. docker-compose.yml +20 -0
  50. docs/cn/Changelog_CN.md +109 -0
.env ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ OPENBLAS_NUM_THREADS = 1
2
+ no_proxy = localhost, 127.0.0.1, ::1
3
+
4
+ # You can change the location of the model, etc. by changing here
5
+ weight_root = assets/weights
6
+ weight_uvr5_root = assets/uvr5_weights
7
+ index_root = logs
8
+ outside_index_root = assets/indices
9
+ rmvpe_root = assets/rmvpe
10
+
11
+ sha256_hubert_base_pt = f54b40fd2802423a5643779c4861af1e9ee9c1564dc9d32f54f20b5ffba7db96
12
+ sha256_rmvpe_pt = 6d62215f4306e3ca278246188607209f09af3dc77ed4232efdd069798c4ec193
13
+ sha256_rmvpe_onnx = 5370e71ac80af8b4b7c793d27efd51fd8bf962de3a7ede0766dac0befa3660fd
14
+
15
+ sha256_v1_D32k_pth = 2ab20645829460fdad0d3c44254f1ab53c32cae50c22a66c926ae5aa30abda6f
16
+ sha256_v1_D40k_pth = 547f66dbbcd9023b9051ed244d12ab043ba8a4e854b154cc28761ac7c002909b
17
+ sha256_v1_D48k_pth = 8cc013fa60ed9c3f902f5bd99f48c7e3b9352d763d4d3cd6bc241c37b0bfd9ad
18
+ sha256_v1_G32k_pth = 81817645cde7ed2e2d83f23ef883f33dda564924b497e84d792743912eca4c23
19
+ sha256_v1_G40k_pth = e428573bda1124b0ae0ae843fd8dcded6027d3993444790b3e9b0100938b2113
20
+ sha256_v1_G48k_pth = 3862a67ea6313e8ffefc05cee6bee656ef3e089442e9ecf4a6618d60721f3e95
21
+ sha256_v1_f0D32k_pth = 294db3087236e2c75260d6179056791c9231245daf5d0485545d9e54c4057c77
22
+ sha256_v1_f0D40k_pth = 7d4f5a441594b470d67579958b2fd4c6b992852ded28ff9e72eda67abcebe423
23
+ sha256_v1_f0D48k_pth = 1b84c8bf347ad1e539c842e8f2a4c36ecd9e7fb23c16041189e4877e9b07925c
24
+ sha256_v1_f0G32k_pth = 285f524bf48bb692c76ad7bd0bc654c12bd9e5edeb784dddf7f61a789a608574
25
+ sha256_v1_f0G40k_pth = 9115654aeef1995f7dd3c6fc4140bebbef0ca9760bed798105a2380a34299831
26
+ sha256_v1_f0G48k_pth = 78bc9cab27e34bcfc194f93029374d871d8b3e663ddedea32a9709e894cc8fe8
27
+
28
+ sha256_v2_D32k_pth = d8043378cc6619083d385f5a045de09b83fb3bf8de45c433ca863b71723ac3ca
29
+ sha256_v2_D40k_pth = 471378e894e7191f89a94eda8288c5947b16bbe0b10c3f1f17efdb7a1d998242
30
+ sha256_v2_D48k_pth = db01094a93c09868a278e03dafe8bb781bfcc1a5ba8df168c948bf9168c84d82
31
+ sha256_v2_G32k_pth = 869b26a47f75168d6126f64ac39e6de5247017a8658cfd68aca600f7323efb9f
32
+ sha256_v2_G40k_pth = a3843da7fde33db1dab176146c70d6c2df06eafe9457f4e3aa10024e9c6a4b69
33
+ sha256_v2_G48k_pth = 2e2b1581a436d07a76b10b9d38765f64aa02836dc65c7dee1ce4140c11ea158b
34
+ sha256_v2_f0D32k_pth = bd7134e7793674c85474d5145d2d982e3c5d8124fc7bb6c20f710ed65808fa8a
35
+ sha256_v2_f0D40k_pth = 6b6ab091e70801b28e3f41f335f2fc5f3f35c75b39ae2628d419644ec2b0fa09
36
+ sha256_v2_f0D48k_pth = 2269b73c7a4cf34da09aea99274dabf99b2ddb8a42cbfb065fb3c0aa9a2fc748
37
+ sha256_v2_f0G32k_pth = 2332611297b8d88c7436de8f17ef5f07a2119353e962cd93cda5806d59a1133d
38
+ sha256_v2_f0G40k_pth = 3b2c44035e782c4b14ddc0bede9e2f4a724d025cd073f736d4f43708453adfcb
39
+ sha256_v2_f0G48k_pth = b5d51f589cc3632d4eae36a315b4179397695042edc01d15312e1bddc2b764a4
40
+
41
+ sha256_uvr5_HP2-人声vocals+非人声instrumentals_pth = 39796caa5db18d7f9382d8ac997ac967bfd85f7761014bb807d2543cc844ef05
42
+ sha256_uvr5_HP2_all_vocals_pth = 39796caa5db18d7f9382d8ac997ac967bfd85f7761014bb807d2543cc844ef05
43
+ sha256_uvr5_HP3_all_vocals_pth = 45e6b65199e781b4a6542002699be9f19cd3d1cb7d1558bc2bfbcd84674dfe28
44
+ sha256_uvr5_HP5-主旋律人声vocals+其他instrumentals_pth = 5908891829634926119720241e8573d97cbeb8277110a7512bdb0bd7563258ee
45
+ sha256_uvr5_HP5_only_main_vocal_pth = 5908891829634926119720241e8573d97cbeb8277110a7512bdb0bd7563258ee
46
+ sha256_uvr5_VR-DeEchoAggressive_pth = 8c8fd1582f9aabc363e47af62ddb88df6cae7e064cae75bbf041a067a5e0aee2
47
+ sha256_uvr5_VR-DeEchoDeReverb_pth = 01376dd2a571bf3cb9cced680732726d2d732609d09216a610b0d110f133febe
48
+ sha256_uvr5_VR-DeEchoNormal_pth = 56aba59db3bcdd14a14464e62f3129698ecdea62eee0f003b9360923eb3ac79e
49
+ sha256_uvr5_vocals_onnx = 233bb5c6aaa365e568659a0a81211746fa881f8f47f82d9e864fce1f7692db80
.gitattributes CHANGED
@@ -1,60 +1,27 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
- *.avro filter=lfs diff=lfs merge=lfs -text
4
- *.bin filter=lfs diff=lfs merge=lfs -text
5
- *.bz2 filter=lfs diff=lfs merge=lfs -text
6
- *.ckpt filter=lfs diff=lfs merge=lfs -text
7
- *.ftz filter=lfs diff=lfs merge=lfs -text
8
- *.gz filter=lfs diff=lfs merge=lfs -text
9
- *.h5 filter=lfs diff=lfs merge=lfs -text
10
- *.joblib filter=lfs diff=lfs merge=lfs -text
11
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
12
- *.lz4 filter=lfs diff=lfs merge=lfs -text
13
- *.mds filter=lfs diff=lfs merge=lfs -text
14
- *.mlmodel filter=lfs diff=lfs merge=lfs -text
15
- *.model filter=lfs diff=lfs merge=lfs -text
16
- *.msgpack filter=lfs diff=lfs merge=lfs -text
17
- *.npy filter=lfs diff=lfs merge=lfs -text
18
- *.npz filter=lfs diff=lfs merge=lfs -text
19
- *.onnx filter=lfs diff=lfs merge=lfs -text
20
- *.ot filter=lfs diff=lfs merge=lfs -text
21
- *.parquet filter=lfs diff=lfs merge=lfs -text
22
- *.pb filter=lfs diff=lfs merge=lfs -text
23
- *.pickle filter=lfs diff=lfs merge=lfs -text
24
- *.pkl filter=lfs diff=lfs merge=lfs -text
25
- *.pt filter=lfs diff=lfs merge=lfs -text
26
- *.pth filter=lfs diff=lfs merge=lfs -text
27
- *.rar filter=lfs diff=lfs merge=lfs -text
28
- *.safetensors filter=lfs diff=lfs merge=lfs -text
29
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
30
- *.tar.* filter=lfs diff=lfs merge=lfs -text
31
- *.tar filter=lfs diff=lfs merge=lfs -text
32
- *.tflite filter=lfs diff=lfs merge=lfs -text
33
- *.tgz filter=lfs diff=lfs merge=lfs -text
34
- *.wasm filter=lfs diff=lfs merge=lfs -text
35
- *.xz filter=lfs diff=lfs merge=lfs -text
36
- *.zip filter=lfs diff=lfs merge=lfs -text
37
- *.zst filter=lfs diff=lfs merge=lfs -text
38
- *tfevents* filter=lfs diff=lfs merge=lfs -text
39
- # Audio files - uncompressed
40
- *.pcm filter=lfs diff=lfs merge=lfs -text
41
- *.sam filter=lfs diff=lfs merge=lfs -text
42
- *.raw filter=lfs diff=lfs merge=lfs -text
43
- # Audio files - compressed
44
- *.aac filter=lfs diff=lfs merge=lfs -text
45
- *.flac filter=lfs diff=lfs merge=lfs -text
46
- *.mp3 filter=lfs diff=lfs merge=lfs -text
47
- *.ogg filter=lfs diff=lfs merge=lfs -text
48
- *.wav filter=lfs diff=lfs merge=lfs -text
49
- # Image files - uncompressed
50
- *.bmp filter=lfs diff=lfs merge=lfs -text
51
- *.gif filter=lfs diff=lfs merge=lfs -text
52
- *.png filter=lfs diff=lfs merge=lfs -text
53
- *.tiff filter=lfs diff=lfs merge=lfs -text
54
- # Image files - compressed
55
- *.jpg filter=lfs diff=lfs merge=lfs -text
56
- *.jpeg filter=lfs diff=lfs merge=lfs -text
57
- *.webp filter=lfs diff=lfs merge=lfs -text
58
- # Video files - compressed
59
- *.mp4 filter=lfs diff=lfs merge=lfs -text
60
- *.webm filter=lfs diff=lfs merge=lfs -text
 
1
+ # Auto detect text files and perform LF normalization
2
+ * text=auto
3
+ Dwayne_cut_2.mp3 filter=lfs diff=lfs merge=lfs -text
4
+ a.png filter=lfs diff=lfs merge=lfs -text
5
+ assets/Synthesizer_inputs.pth filter=lfs diff=lfs merge=lfs -text
6
+ assets/hubert/hubert_inputs.pth filter=lfs diff=lfs merge=lfs -text
7
+ assets/pretrained_v2/f0D32k.pth filter=lfs diff=lfs merge=lfs -text
8
+ assets/pretrained_v2/f0G32k.pth filter=lfs diff=lfs merge=lfs -text
9
+ assets/pretrained_v2/f0Ov2Super32kD.pth filter=lfs diff=lfs merge=lfs -text
10
+ assets/pretrained_v2/f0Ov2Super32kG.pth filter=lfs diff=lfs merge=lfs -text
11
+ assets/rmvpe/rmvpe_inputs.pth filter=lfs diff=lfs merge=lfs -text
12
+ audio.mp3 filter=lfs diff=lfs merge=lfs -text
13
+ audios/astronauts.mp3 filter=lfs diff=lfs merge=lfs -text
14
+ audios/output_audio.wav filter=lfs diff=lfs merge=lfs -text
15
+ audios/somegirl.mp3 filter=lfs diff=lfs merge=lfs -text
16
+ audios/someguy.mp3 filter=lfs diff=lfs merge=lfs -text
17
+ audios/unachica.mp3 filter=lfs diff=lfs merge=lfs -text
18
+ audios/unchico.mp3 filter=lfs diff=lfs merge=lfs -text
19
+ logs/mute/0_gt_wavs/mute32k.spec.pt filter=lfs diff=lfs merge=lfs -text
20
+ logs/mute/0_gt_wavs/mute32k.wav filter=lfs diff=lfs merge=lfs -text
21
+ logs/mute/0_gt_wavs/mute40k.wav filter=lfs diff=lfs merge=lfs -text
22
+ logs/mute/0_gt_wavs/mute48k.wav filter=lfs diff=lfs merge=lfs -text
23
+ logs/mute/1_16k_wavs/mute.wav filter=lfs diff=lfs merge=lfs -text
24
+ logs/mute/2a_f0/mute.wav.npy filter=lfs diff=lfs merge=lfs -text
25
+ logs/mute/2b-f0nsf/mute.wav.npy filter=lfs diff=lfs merge=lfs -text
26
+ logs/mute/3_feature256/mute.npy filter=lfs diff=lfs merge=lfs -text
27
+ logs/mute/3_feature768/mute.npy filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
.gitignore ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .DS_Store
2
+ __pycache__
3
+ /TEMP
4
+ *.pyd
5
+ .venv
6
+ /opt
7
+ tools/aria2c/
8
+ tools/flag.txt
9
+
10
+ # Imported from huggingface.co/lj1995/VoiceConversionWebUI
11
+ /pretrained
12
+ /pretrained_v2
13
+ /uvr5_weights
14
+ hubert_base.pt
15
+ rmvpe.onnx
16
+ rmvpe.pt
17
+
18
+ # Generated by RVC
19
+ /logs
20
+ /weights
21
+
22
+ # To set a Python version for the project
23
+ .tool-versions
24
+
25
+ /runtime
26
+ /assets/weights/*
27
+ ffmpeg.*
28
+ ffprobe.*
CONTRIBUTING.md ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # 贡献规则
2
+ 1. 一般来说,作者`@RVC-Boss`将拒绝所有的算法更改,除非它是为了修复某个代码层面的错误或警告
3
+ 2. 您可以贡献本仓库的其他位置,如翻译和WebUI,但请尽量作最小更改
4
+ 3. 所有更改都需要由`@RVC-Boss`批准,因此您的PR可能会被搁置
5
+ 4. 由此带来的不便请您谅解
6
+
7
+ # Contributing Rules
8
+ 1. Generally, the author `@RVC-Boss` will reject all algorithm changes unless what is to fix a code-level error or warning.
9
+ 2. You can contribute to other parts of this repo like translations and WebUI, but please minimize your changes as much as possible.
10
+ 3. All changes need to be approved by `@RVC-Boss`, so your PR may be put on hold.
11
+ 4. Please accept our apologies for any inconvenience caused.
Dockerfile ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # syntax=docker/dockerfile:1
2
+
3
+ FROM nvidia/cuda:11.6.2-cudnn8-runtime-ubuntu20.04
4
+
5
+ EXPOSE 7865
6
+
7
+ WORKDIR /app
8
+
9
+ # Install dependenceis to add PPAs
10
+ RUN apt-get update && \
11
+ apt-get install -y -qq ffmpeg aria2 && apt clean && \
12
+ apt-get install -y software-properties-common && \
13
+ apt-get clean && \
14
+ rm -rf /var/lib/apt/lists/*
15
+ # Add the deadsnakes PPA to get Python 3.9
16
+ RUN add-apt-repository ppa:deadsnakes/ppa
17
+
18
+ # Install Python 3.9 and pip
19
+ RUN apt-get update && \
20
+ apt-get install -y build-essential python-dev python3-dev python3.9-distutils python3.9-dev python3.9 curl && \
21
+ apt-get clean && \
22
+ update-alternatives --install /usr/bin/python3 python3 /usr/bin/python3.9 1 && \
23
+ curl https://bootstrap.pypa.io/get-pip.py | python3.9
24
+
25
+ # Set Python 3.9 as the default
26
+ RUN update-alternatives --install /usr/bin/python python /usr/bin/python3.9 1
27
+
28
+ COPY . .
29
+
30
+ RUN python3 -m pip install --no-cache-dir -r requirements.txt
31
+
32
+ RUN aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained_v2/D40k.pth -d assets/pretrained_v2/ -o D40k.pth
33
+ RUN aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained_v2/G40k.pth -d assets/pretrained_v2/ -o G40k.pth
34
+ RUN aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained_v2/f0D40k.pth -d assets/pretrained_v2/ -o f0D40k.pth
35
+ RUN aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained_v2/f0G40k.pth -d assets/pretrained_v2/ -o f0G40k.pth
36
+
37
+ RUN aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/uvr5_weights/HP2-人声vocals+非人声instrumentals.pth -d assets/uvr5_weights/ -o HP2-人声vocals+非人声instrumentals.pth
38
+ RUN aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/uvr5_weights/HP5-主旋律人声vocals+其他instrumentals.pth -d assets/uvr5_weights/ -o HP5-主旋律人声vocals+其他instrumentals.pth
39
+
40
+ RUN aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/hubert_base.pt -d assets/hubert -o hubert_base.pt
41
+
42
+ RUN aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/rmvpe.pt -d assets/rmvpe -o rmvpe.pt
43
+
44
+ VOLUME [ "/app/weights", "/app/opt" ]
45
+
46
+ CMD ["python3", "infer-web.py"]
Dwayne_cut_2.mp3 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:48496f3828fe421c161576358d0e2fe07c40e0cea48676b67a0c1e431e244b27
3
+ size 644310
LICENSE ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MIT License
2
+
3
+ Copyright (c) 2023-2024 liujing04
4
+ Copyright (c) 2023-2024 fumiama
5
+ Copyright (c) 2023-2024 Ftps
6
+
7
+ Permission is hereby granted, free of charge, to any person obtaining a copy
8
+ of this software and associated documentation files (the "Software"), to deal
9
+ in the Software without restriction, including without limitation the rights
10
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11
+ copies of the Software, and to permit persons to whom the Software is
12
+ furnished to do so, subject to the following conditions:
13
+
14
+ The above copyright notice and this permission notice shall be included in all
15
+ copies or substantial portions of the Software.
16
+
17
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
20
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
23
+ SOFTWARE.
MIT协议暨相关引用库协议 ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 本软件及其相关代码以MIT协议开源,作者不对软件具备任何控制力,使用软件者、传播软件导出的声音者自负全责。
2
+ 如不认可该条款,则不能使用或引用软件包内任何代码和文件。
3
+
4
+ 特此授予任何获得本软件和相关文档文件(以下简称“软件”)副本的人免费使用、复制、修改、合并、出版、分发、再授权和/或销售本软件的权利,以及授予本软件所提供的人使用本软件的权利,但须符合以下条件:
5
+ 上述版权声明和本许可声明应包含在软件的所有副本或实质部分中。
6
+ 软件是“按原样”提供的,没有任何明示或暗示的保证,包括但不限于适销性、适用于特定目的和不侵权的保证。在任何情况下,作者或版权持有人均不承担因软件或软件的使用或其他交易而产生、产生或与之相关的任何索赔、损害赔偿或其他责任,无论是在合同诉讼、侵权诉讼还是其他诉讼中。
7
+
8
+
9
+ The LICENCEs for related libraries are as follows.
10
+ 相关引用库协议如下:
11
+
12
+ ContentVec
13
+ https://github.com/auspicious3000/contentvec/blob/main/LICENSE
14
+ MIT License
15
+
16
+ VITS
17
+ https://github.com/jaywalnut310/vits/blob/main/LICENSE
18
+ MIT License
19
+
20
+ HIFIGAN
21
+ https://github.com/jik876/hifi-gan/blob/master/LICENSE
22
+ MIT License
23
+
24
+ gradio
25
+ https://github.com/gradio-app/gradio/blob/main/LICENSE
26
+ Apache License 2.0
27
+
28
+ ffmpeg
29
+ https://github.com/FFmpeg/FFmpeg/blob/master/COPYING.LGPLv3
30
+ https://github.com/BtbN/FFmpeg-Builds/releases/download/autobuild-2021-02-28-12-32/ffmpeg-n4.3.2-160-gfbb9368226-win64-lgpl-4.3.zip
31
+ LPGLv3 License
32
+ MIT License
33
+
34
+ ultimatevocalremovergui
35
+ https://github.com/Anjok07/ultimatevocalremovergui/blob/master/LICENSE
36
+ https://github.com/yang123qwe/vocal_separation_by_uvr5
37
+ MIT License
38
+
39
+ audio-slicer
40
+ https://github.com/openvpi/audio-slicer/blob/main/LICENSE
41
+ MIT License
42
+
43
+ FreeSimpleGUI
44
+ https://github.com/spyoungtech/FreeSimpleGUI/blob/master/license.txt
45
+ LPGLv3 License
README.md ADDED
@@ -0,0 +1 @@
 
 
1
+ RVC
__pycache__/easy_sync.cpython-311.pyc ADDED
Binary file (9.89 kB). View file
 
a.png ADDED

Git LFS Details

  • SHA256: 2c8e539ca47a7597f46f4c6e1e46512399043fc1feb00f5686a2ab09f072db73
  • Pointer size: 129 Bytes
  • Size of remote file: 2.35 kB
assets/Synthesizer_inputs.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4c5ae8cd034b02bbc325939e9b9debbedb43ee9d71a654daaff8804815bd957d
3
+ size 122495
assets/hubert/.gitignore ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ *
2
+ !.gitignore
3
+ !hubert_inputs.pth
assets/hubert/hubert_inputs.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bbd4741d4be8a71333170c0df5320f605a9d210b96547b391555da078167861f
3
+ size 169434
assets/indices/.gitignore ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ *
2
+ !.gitignore
assets/pretrained/.gitignore ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ *
2
+ !.gitignore
assets/pretrained_v2/.gitignore ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ *
2
+ !.gitignore
assets/pretrained_v2/f0D32k.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bd7134e7793674c85474d5145d2d982e3c5d8124fc7bb6c20f710ed65808fa8a
3
+ size 142875703
assets/pretrained_v2/f0G32k.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2332611297b8d88c7436de8f17ef5f07a2119353e962cd93cda5806d59a1133d
3
+ size 73950049
assets/pretrained_v2/f0Ov2Super32kD.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2401113af8524bc5c0fe2221a81997b32f85db782f2271bb21d268f2fbf15c56
3
+ size 857123266
assets/pretrained_v2/f0Ov2Super32kG.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e4af1279fb8fd15af9eacbb41687fc695e74009e9dd0edc634b6296453324db4
3
+ size 443230526
assets/rmvpe/.gitignore ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ *
2
+ !.gitignore
3
+ !rmvpe_inputs.pth
assets/rmvpe/rmvpe_inputs.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:339fcb7e1476b302e9aecef4a951e918c20852b2e871de5eea13b06e554e0a3a
3
+ size 33527
assets/uvr5_weights/.gitignore ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ *
2
+ !.gitignore
audio.mp3 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:53757d5986f47d8bd8726c72b22d5d4ab17e3e61082a0235f5c4d8543e76107c
3
+ size 182637
audios/astronauts.mp3 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:638fcd17801db878209c1b5b5b5c0e73a9da842e090d9077fca534c1dd7b6d34
3
+ size 73560
audios/output_audio.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7fb256559e80671ba9e11cedde5bcfeae33f71774533e18e753134a06625320a
3
+ size 481324
audios/somegirl.mp3 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:955cc2aa443c588a8eb3d3fbb30bfd31b7503071981076f38c89110d5be9fc28
3
+ size 32182
audios/someguy.mp3 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a30d055d4e0f5f501edc59c66d84fc8b57a052f9906e94041474ee5ad8c5a2e0
3
+ size 24868
audios/unachica.mp3 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:131cc78d8cb23cac783bbd0485667dfd59192a0006297d6ec981f3d075dee74e
3
+ size 36362
audios/unchico.mp3 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:901d92e7726be546da88416b7e0af251c25a93f5c17c4b0ebb0a70d99f909dfe
3
+ size 35944
configs/__pycache__/config.cpython-311.pyc ADDED
Binary file (11.4 kB). View file
 
configs/config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"pth_path": "assets/weights/kikiV1.pth", "index_path": "logs/kikiV1.index", "sg_hostapi": "MME", "sg_wasapi_exclusive": false, "sg_input_device": "VoiceMeeter Output (VB-Audio Vo", "sg_output_device": "VoiceMeeter Input (VB-Audio Voi", "sr_type": "sr_device", "threhold": -60.0, "pitch": 12.0, "rms_mix_rate": 0.5, "index_rate": 0.0, "block_time": 0.15, "crossfade_length": 0.08, "extra_time": 2.0, "n_cpu": 4.0, "use_jit": false, "use_pv": false, "f0method": "fcpe"}
configs/config.py ADDED
@@ -0,0 +1,259 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import os
3
+ import sys
4
+ import json
5
+ import shutil
6
+ from multiprocessing import cpu_count
7
+
8
+ import torch
9
+
10
+ try:
11
+ import intel_extension_for_pytorch as ipex # pylint: disable=import-error, unused-import
12
+
13
+ if torch.xpu.is_available():
14
+ from infer.modules.ipex import ipex_init
15
+
16
+ ipex_init()
17
+ except Exception: # pylint: disable=broad-exception-caught
18
+ pass
19
+ import logging
20
+
21
+ logger = logging.getLogger(__name__)
22
+
23
+
24
+ version_config_list = [
25
+ "v1/32k.json",
26
+ "v1/40k.json",
27
+ "v1/48k.json",
28
+ "v2/48k.json",
29
+ "v2/32k.json",
30
+ ]
31
+
32
+
33
+ def singleton_variable(func):
34
+ def wrapper(*args, **kwargs):
35
+ if not wrapper.instance:
36
+ wrapper.instance = func(*args, **kwargs)
37
+ return wrapper.instance
38
+
39
+ wrapper.instance = None
40
+ return wrapper
41
+
42
+
43
+ @singleton_variable
44
+ class Config:
45
+ def __init__(self):
46
+ self.device = "cuda:0"
47
+ self.is_half = True
48
+ self.use_jit = False
49
+ self.n_cpu = 0
50
+ self.gpu_name = None
51
+ self.json_config = self.load_config_json()
52
+ self.gpu_mem = None
53
+ (
54
+ self.python_cmd,
55
+ self.listen_port,
56
+ self.iscolab,
57
+ self.noparallel,
58
+ self.noautoopen,
59
+ self.dml,
60
+ self.nocheck,
61
+ ) = self.arg_parse()
62
+ self.instead = ""
63
+ self.preprocess_per = 3.7
64
+ self.x_pad, self.x_query, self.x_center, self.x_max = self.device_config()
65
+
66
+ @staticmethod
67
+ def load_config_json() -> dict:
68
+ d = {}
69
+ for config_file in version_config_list:
70
+ p = f"configs/inuse/{config_file}"
71
+ if not os.path.exists(p):
72
+ shutil.copy(f"configs/{config_file}", p)
73
+ with open(f"configs/inuse/{config_file}", "r") as f:
74
+ d[config_file] = json.load(f)
75
+ return d
76
+
77
+ @staticmethod
78
+ def arg_parse() -> tuple:
79
+ exe = sys.executable or "python"
80
+ parser = argparse.ArgumentParser()
81
+ parser.add_argument("--port", type=int, default=7865, help="Listen port")
82
+ parser.add_argument("--pycmd", type=str, default=exe, help="Python command")
83
+ parser.add_argument("--colab", action="store_true", help="Launch in colab")
84
+ parser.add_argument(
85
+ "--noparallel", action="store_true", help="Disable parallel processing"
86
+ )
87
+ parser.add_argument(
88
+ "--noautoopen",
89
+ action="store_true",
90
+ help="Do not open in browser automatically",
91
+ )
92
+ parser.add_argument(
93
+ "--dml",
94
+ action="store_true",
95
+ help="torch_dml",
96
+ )
97
+ parser.add_argument(
98
+ "--nocheck", action="store_true", help="Run without checking assets"
99
+ )
100
+ cmd_opts = parser.parse_args()
101
+
102
+ cmd_opts.port = cmd_opts.port if 0 <= cmd_opts.port <= 65535 else 7865
103
+
104
+ return (
105
+ cmd_opts.pycmd,
106
+ cmd_opts.port,
107
+ cmd_opts.colab,
108
+ cmd_opts.noparallel,
109
+ cmd_opts.noautoopen,
110
+ cmd_opts.dml,
111
+ cmd_opts.nocheck,
112
+ )
113
+
114
+ # has_mps is only available in nightly pytorch (for now) and MasOS 12.3+.
115
+ # check `getattr` and try it for compatibility
116
+ @staticmethod
117
+ def has_mps() -> bool:
118
+ if not torch.backends.mps.is_available():
119
+ return False
120
+ try:
121
+ torch.zeros(1).to(torch.device("mps"))
122
+ return True
123
+ except Exception:
124
+ return False
125
+
126
+ @staticmethod
127
+ def has_xpu() -> bool:
128
+ if hasattr(torch, "xpu") and torch.xpu.is_available():
129
+ return True
130
+ else:
131
+ return False
132
+
133
+ def use_fp32_config(self):
134
+ for config_file in version_config_list:
135
+ self.json_config[config_file]["train"]["fp16_run"] = False
136
+ with open(f"configs/inuse/{config_file}", "r") as f:
137
+ strr = f.read().replace("true", "false")
138
+ with open(f"configs/inuse/{config_file}", "w") as f:
139
+ f.write(strr)
140
+ logger.info("overwrite " + config_file)
141
+ self.preprocess_per = 3.0
142
+ logger.info("overwrite preprocess_per to %d" % (self.preprocess_per))
143
+
144
+ def device_config(self) -> tuple:
145
+ if torch.cuda.is_available():
146
+ if self.has_xpu():
147
+ self.device = self.instead = "xpu:0"
148
+ self.is_half = True
149
+ i_device = int(self.device.split(":")[-1])
150
+ self.gpu_name = torch.cuda.get_device_name(i_device)
151
+ if (
152
+ ("16" in self.gpu_name and "V100" not in self.gpu_name.upper())
153
+ or "P40" in self.gpu_name.upper()
154
+ or "P10" in self.gpu_name.upper()
155
+ or "1060" in self.gpu_name
156
+ or "1070" in self.gpu_name
157
+ or "1080" in self.gpu_name
158
+ ):
159
+ logger.info("Found GPU %s, force to fp32", self.gpu_name)
160
+ self.is_half = False
161
+ self.use_fp32_config()
162
+ else:
163
+ logger.info("Found GPU %s", self.gpu_name)
164
+ self.gpu_mem = int(
165
+ torch.cuda.get_device_properties(i_device).total_memory
166
+ / 1024
167
+ / 1024
168
+ / 1024
169
+ + 0.4
170
+ )
171
+ if self.gpu_mem <= 4:
172
+ self.preprocess_per = 3.0
173
+ elif self.has_mps():
174
+ logger.info("No supported Nvidia GPU found")
175
+ self.device = self.instead = "mps"
176
+ self.is_half = False
177
+ self.use_fp32_config()
178
+ else:
179
+ logger.info("No supported Nvidia GPU found")
180
+ self.device = self.instead = "cpu"
181
+ self.is_half = False
182
+ self.use_fp32_config()
183
+
184
+ if self.n_cpu == 0:
185
+ self.n_cpu = cpu_count()
186
+
187
+ if self.is_half:
188
+ # 6G显存配置
189
+ x_pad = 3
190
+ x_query = 10
191
+ x_center = 60
192
+ x_max = 65
193
+ else:
194
+ # 5G显存配置
195
+ x_pad = 1
196
+ x_query = 6
197
+ x_center = 38
198
+ x_max = 41
199
+
200
+ if self.gpu_mem is not None and self.gpu_mem <= 4:
201
+ x_pad = 1
202
+ x_query = 5
203
+ x_center = 30
204
+ x_max = 32
205
+ if self.dml:
206
+ logger.info("Use DirectML instead")
207
+ if (
208
+ os.path.exists(
209
+ "runtime\Lib\site-packages\onnxruntime\capi\DirectML.dll"
210
+ )
211
+ == False
212
+ ):
213
+ try:
214
+ os.rename(
215
+ "runtime\Lib\site-packages\onnxruntime",
216
+ "runtime\Lib\site-packages\onnxruntime-cuda",
217
+ )
218
+ except:
219
+ pass
220
+ try:
221
+ os.rename(
222
+ "runtime\Lib\site-packages\onnxruntime-dml",
223
+ "runtime\Lib\site-packages\onnxruntime",
224
+ )
225
+ except:
226
+ pass
227
+ # if self.device != "cpu":
228
+ import torch_directml
229
+
230
+ self.device = torch_directml.device(torch_directml.default_device())
231
+ self.is_half = False
232
+ else:
233
+ if self.instead:
234
+ logger.info(f"Use {self.instead} instead")
235
+ if (
236
+ os.path.exists(
237
+ "runtime\Lib\site-packages\onnxruntime\capi\onnxruntime_providers_cuda.dll"
238
+ )
239
+ == False
240
+ ):
241
+ try:
242
+ os.rename(
243
+ "runtime\Lib\site-packages\onnxruntime",
244
+ "runtime\Lib\site-packages\onnxruntime-dml",
245
+ )
246
+ except:
247
+ pass
248
+ try:
249
+ os.rename(
250
+ "runtime\Lib\site-packages\onnxruntime-cuda",
251
+ "runtime\Lib\site-packages\onnxruntime",
252
+ )
253
+ except:
254
+ pass
255
+ logger.info(
256
+ "Half-precision floating-point: %s, device: %s"
257
+ % (self.is_half, self.device)
258
+ )
259
+ return x_pad, x_query, x_center, x_max
configs/inuse/.gitignore ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ *
2
+ !.gitignore
3
+ !v1
4
+ !v2
configs/inuse/v1/.gitignore ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ *
2
+ !.gitignore
configs/inuse/v1/32k.json ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "train": {
3
+ "log_interval": 200,
4
+ "seed": 1234,
5
+ "epochs": 20000,
6
+ "learning_rate": 1e-4,
7
+ "betas": [0.8, 0.99],
8
+ "eps": 1e-9,
9
+ "batch_size": 4,
10
+ "fp16_run": true,
11
+ "lr_decay": 0.999875,
12
+ "segment_size": 12800,
13
+ "init_lr_ratio": 1,
14
+ "warmup_epochs": 0,
15
+ "c_mel": 45,
16
+ "c_kl": 1.0
17
+ },
18
+ "data": {
19
+ "max_wav_value": 32768.0,
20
+ "sampling_rate": 32000,
21
+ "filter_length": 1024,
22
+ "hop_length": 320,
23
+ "win_length": 1024,
24
+ "n_mel_channels": 80,
25
+ "mel_fmin": 0.0,
26
+ "mel_fmax": null
27
+ },
28
+ "model": {
29
+ "inter_channels": 192,
30
+ "hidden_channels": 192,
31
+ "filter_channels": 768,
32
+ "n_heads": 2,
33
+ "n_layers": 6,
34
+ "kernel_size": 3,
35
+ "p_dropout": 0,
36
+ "resblock": "1",
37
+ "resblock_kernel_sizes": [3,7,11],
38
+ "resblock_dilation_sizes": [[1,3,5], [1,3,5], [1,3,5]],
39
+ "upsample_rates": [10,4,2,2,2],
40
+ "upsample_initial_channel": 512,
41
+ "upsample_kernel_sizes": [16,16,4,4,4],
42
+ "use_spectral_norm": false,
43
+ "gin_channels": 256,
44
+ "spk_embed_dim": 109
45
+ }
46
+ }
configs/inuse/v1/40k.json ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "train": {
3
+ "log_interval": 200,
4
+ "seed": 1234,
5
+ "epochs": 20000,
6
+ "learning_rate": 1e-4,
7
+ "betas": [0.8, 0.99],
8
+ "eps": 1e-9,
9
+ "batch_size": 4,
10
+ "fp16_run": true,
11
+ "lr_decay": 0.999875,
12
+ "segment_size": 12800,
13
+ "init_lr_ratio": 1,
14
+ "warmup_epochs": 0,
15
+ "c_mel": 45,
16
+ "c_kl": 1.0
17
+ },
18
+ "data": {
19
+ "max_wav_value": 32768.0,
20
+ "sampling_rate": 40000,
21
+ "filter_length": 2048,
22
+ "hop_length": 400,
23
+ "win_length": 2048,
24
+ "n_mel_channels": 125,
25
+ "mel_fmin": 0.0,
26
+ "mel_fmax": null
27
+ },
28
+ "model": {
29
+ "inter_channels": 192,
30
+ "hidden_channels": 192,
31
+ "filter_channels": 768,
32
+ "n_heads": 2,
33
+ "n_layers": 6,
34
+ "kernel_size": 3,
35
+ "p_dropout": 0,
36
+ "resblock": "1",
37
+ "resblock_kernel_sizes": [3,7,11],
38
+ "resblock_dilation_sizes": [[1,3,5], [1,3,5], [1,3,5]],
39
+ "upsample_rates": [10,10,2,2],
40
+ "upsample_initial_channel": 512,
41
+ "upsample_kernel_sizes": [16,16,4,4],
42
+ "use_spectral_norm": false,
43
+ "gin_channels": 256,
44
+ "spk_embed_dim": 109
45
+ }
46
+ }
configs/inuse/v1/48k.json ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "train": {
3
+ "log_interval": 200,
4
+ "seed": 1234,
5
+ "epochs": 20000,
6
+ "learning_rate": 1e-4,
7
+ "betas": [0.8, 0.99],
8
+ "eps": 1e-9,
9
+ "batch_size": 4,
10
+ "fp16_run": true,
11
+ "lr_decay": 0.999875,
12
+ "segment_size": 11520,
13
+ "init_lr_ratio": 1,
14
+ "warmup_epochs": 0,
15
+ "c_mel": 45,
16
+ "c_kl": 1.0
17
+ },
18
+ "data": {
19
+ "max_wav_value": 32768.0,
20
+ "sampling_rate": 48000,
21
+ "filter_length": 2048,
22
+ "hop_length": 480,
23
+ "win_length": 2048,
24
+ "n_mel_channels": 128,
25
+ "mel_fmin": 0.0,
26
+ "mel_fmax": null
27
+ },
28
+ "model": {
29
+ "inter_channels": 192,
30
+ "hidden_channels": 192,
31
+ "filter_channels": 768,
32
+ "n_heads": 2,
33
+ "n_layers": 6,
34
+ "kernel_size": 3,
35
+ "p_dropout": 0,
36
+ "resblock": "1",
37
+ "resblock_kernel_sizes": [3,7,11],
38
+ "resblock_dilation_sizes": [[1,3,5], [1,3,5], [1,3,5]],
39
+ "upsample_rates": [10,6,2,2,2],
40
+ "upsample_initial_channel": 512,
41
+ "upsample_kernel_sizes": [16,16,4,4,4],
42
+ "use_spectral_norm": false,
43
+ "gin_channels": 256,
44
+ "spk_embed_dim": 109
45
+ }
46
+ }
configs/inuse/v2/.gitignore ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ *
2
+ !.gitignore
configs/inuse/v2/32k.json ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "train": {
3
+ "log_interval": 200,
4
+ "seed": 1234,
5
+ "epochs": 20000,
6
+ "learning_rate": 1e-4,
7
+ "betas": [0.8, 0.99],
8
+ "eps": 1e-9,
9
+ "batch_size": 4,
10
+ "fp16_run": true,
11
+ "lr_decay": 0.999875,
12
+ "segment_size": 12800,
13
+ "init_lr_ratio": 1,
14
+ "warmup_epochs": 0,
15
+ "c_mel": 45,
16
+ "c_kl": 1.0
17
+ },
18
+ "data": {
19
+ "max_wav_value": 32768.0,
20
+ "sampling_rate": 32000,
21
+ "filter_length": 1024,
22
+ "hop_length": 320,
23
+ "win_length": 1024,
24
+ "n_mel_channels": 80,
25
+ "mel_fmin": 0.0,
26
+ "mel_fmax": null
27
+ },
28
+ "model": {
29
+ "inter_channels": 192,
30
+ "hidden_channels": 192,
31
+ "filter_channels": 768,
32
+ "n_heads": 2,
33
+ "n_layers": 6,
34
+ "kernel_size": 3,
35
+ "p_dropout": 0,
36
+ "resblock": "1",
37
+ "resblock_kernel_sizes": [3,7,11],
38
+ "resblock_dilation_sizes": [[1,3,5], [1,3,5], [1,3,5]],
39
+ "upsample_rates": [10,8,2,2],
40
+ "upsample_initial_channel": 512,
41
+ "upsample_kernel_sizes": [20,16,4,4],
42
+ "use_spectral_norm": false,
43
+ "gin_channels": 256,
44
+ "spk_embed_dim": 109
45
+ }
46
+ }
configs/inuse/v2/48k.json ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "train": {
3
+ "log_interval": 200,
4
+ "seed": 1234,
5
+ "epochs": 20000,
6
+ "learning_rate": 1e-4,
7
+ "betas": [0.8, 0.99],
8
+ "eps": 1e-9,
9
+ "batch_size": 4,
10
+ "fp16_run": true,
11
+ "lr_decay": 0.999875,
12
+ "segment_size": 17280,
13
+ "init_lr_ratio": 1,
14
+ "warmup_epochs": 0,
15
+ "c_mel": 45,
16
+ "c_kl": 1.0
17
+ },
18
+ "data": {
19
+ "max_wav_value": 32768.0,
20
+ "sampling_rate": 48000,
21
+ "filter_length": 2048,
22
+ "hop_length": 480,
23
+ "win_length": 2048,
24
+ "n_mel_channels": 128,
25
+ "mel_fmin": 0.0,
26
+ "mel_fmax": null
27
+ },
28
+ "model": {
29
+ "inter_channels": 192,
30
+ "hidden_channels": 192,
31
+ "filter_channels": 768,
32
+ "n_heads": 2,
33
+ "n_layers": 6,
34
+ "kernel_size": 3,
35
+ "p_dropout": 0,
36
+ "resblock": "1",
37
+ "resblock_kernel_sizes": [3,7,11],
38
+ "resblock_dilation_sizes": [[1,3,5], [1,3,5], [1,3,5]],
39
+ "upsample_rates": [12,10,2,2],
40
+ "upsample_initial_channel": 512,
41
+ "upsample_kernel_sizes": [24,20,4,4],
42
+ "use_spectral_norm": false,
43
+ "gin_channels": 256,
44
+ "spk_embed_dim": 109
45
+ }
46
+ }
configs/v1/32k.json ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "train": {
3
+ "log_interval": 200,
4
+ "seed": 1234,
5
+ "epochs": 20000,
6
+ "learning_rate": 1e-4,
7
+ "betas": [0.8, 0.99],
8
+ "eps": 1e-9,
9
+ "batch_size": 4,
10
+ "fp16_run": true,
11
+ "lr_decay": 0.999875,
12
+ "segment_size": 12800,
13
+ "init_lr_ratio": 1,
14
+ "warmup_epochs": 0,
15
+ "c_mel": 45,
16
+ "c_kl": 1.0
17
+ },
18
+ "data": {
19
+ "max_wav_value": 32768.0,
20
+ "sampling_rate": 32000,
21
+ "filter_length": 1024,
22
+ "hop_length": 320,
23
+ "win_length": 1024,
24
+ "n_mel_channels": 80,
25
+ "mel_fmin": 0.0,
26
+ "mel_fmax": null
27
+ },
28
+ "model": {
29
+ "inter_channels": 192,
30
+ "hidden_channels": 192,
31
+ "filter_channels": 768,
32
+ "n_heads": 2,
33
+ "n_layers": 6,
34
+ "kernel_size": 3,
35
+ "p_dropout": 0,
36
+ "resblock": "1",
37
+ "resblock_kernel_sizes": [3,7,11],
38
+ "resblock_dilation_sizes": [[1,3,5], [1,3,5], [1,3,5]],
39
+ "upsample_rates": [10,4,2,2,2],
40
+ "upsample_initial_channel": 512,
41
+ "upsample_kernel_sizes": [16,16,4,4,4],
42
+ "use_spectral_norm": false,
43
+ "gin_channels": 256,
44
+ "spk_embed_dim": 109
45
+ }
46
+ }
configs/v1/40k.json ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "train": {
3
+ "log_interval": 200,
4
+ "seed": 1234,
5
+ "epochs": 20000,
6
+ "learning_rate": 1e-4,
7
+ "betas": [0.8, 0.99],
8
+ "eps": 1e-9,
9
+ "batch_size": 4,
10
+ "fp16_run": true,
11
+ "lr_decay": 0.999875,
12
+ "segment_size": 12800,
13
+ "init_lr_ratio": 1,
14
+ "warmup_epochs": 0,
15
+ "c_mel": 45,
16
+ "c_kl": 1.0
17
+ },
18
+ "data": {
19
+ "max_wav_value": 32768.0,
20
+ "sampling_rate": 40000,
21
+ "filter_length": 2048,
22
+ "hop_length": 400,
23
+ "win_length": 2048,
24
+ "n_mel_channels": 125,
25
+ "mel_fmin": 0.0,
26
+ "mel_fmax": null
27
+ },
28
+ "model": {
29
+ "inter_channels": 192,
30
+ "hidden_channels": 192,
31
+ "filter_channels": 768,
32
+ "n_heads": 2,
33
+ "n_layers": 6,
34
+ "kernel_size": 3,
35
+ "p_dropout": 0,
36
+ "resblock": "1",
37
+ "resblock_kernel_sizes": [3,7,11],
38
+ "resblock_dilation_sizes": [[1,3,5], [1,3,5], [1,3,5]],
39
+ "upsample_rates": [10,10,2,2],
40
+ "upsample_initial_channel": 512,
41
+ "upsample_kernel_sizes": [16,16,4,4],
42
+ "use_spectral_norm": false,
43
+ "gin_channels": 256,
44
+ "spk_embed_dim": 109
45
+ }
46
+ }
configs/v1/48k.json ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "train": {
3
+ "log_interval": 200,
4
+ "seed": 1234,
5
+ "epochs": 20000,
6
+ "learning_rate": 1e-4,
7
+ "betas": [0.8, 0.99],
8
+ "eps": 1e-9,
9
+ "batch_size": 4,
10
+ "fp16_run": true,
11
+ "lr_decay": 0.999875,
12
+ "segment_size": 11520,
13
+ "init_lr_ratio": 1,
14
+ "warmup_epochs": 0,
15
+ "c_mel": 45,
16
+ "c_kl": 1.0
17
+ },
18
+ "data": {
19
+ "max_wav_value": 32768.0,
20
+ "sampling_rate": 48000,
21
+ "filter_length": 2048,
22
+ "hop_length": 480,
23
+ "win_length": 2048,
24
+ "n_mel_channels": 128,
25
+ "mel_fmin": 0.0,
26
+ "mel_fmax": null
27
+ },
28
+ "model": {
29
+ "inter_channels": 192,
30
+ "hidden_channels": 192,
31
+ "filter_channels": 768,
32
+ "n_heads": 2,
33
+ "n_layers": 6,
34
+ "kernel_size": 3,
35
+ "p_dropout": 0,
36
+ "resblock": "1",
37
+ "resblock_kernel_sizes": [3,7,11],
38
+ "resblock_dilation_sizes": [[1,3,5], [1,3,5], [1,3,5]],
39
+ "upsample_rates": [10,6,2,2,2],
40
+ "upsample_initial_channel": 512,
41
+ "upsample_kernel_sizes": [16,16,4,4,4],
42
+ "use_spectral_norm": false,
43
+ "gin_channels": 256,
44
+ "spk_embed_dim": 109
45
+ }
46
+ }
configs/v2/32k.json ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "train": {
3
+ "log_interval": 200,
4
+ "seed": 1234,
5
+ "epochs": 20000,
6
+ "learning_rate": 1e-4,
7
+ "betas": [0.8, 0.99],
8
+ "eps": 1e-9,
9
+ "batch_size": 4,
10
+ "fp16_run": true,
11
+ "lr_decay": 0.999875,
12
+ "segment_size": 12800,
13
+ "init_lr_ratio": 1,
14
+ "warmup_epochs": 0,
15
+ "c_mel": 45,
16
+ "c_kl": 1.0
17
+ },
18
+ "data": {
19
+ "max_wav_value": 32768.0,
20
+ "sampling_rate": 32000,
21
+ "filter_length": 1024,
22
+ "hop_length": 320,
23
+ "win_length": 1024,
24
+ "n_mel_channels": 80,
25
+ "mel_fmin": 0.0,
26
+ "mel_fmax": null
27
+ },
28
+ "model": {
29
+ "inter_channels": 192,
30
+ "hidden_channels": 192,
31
+ "filter_channels": 768,
32
+ "n_heads": 2,
33
+ "n_layers": 6,
34
+ "kernel_size": 3,
35
+ "p_dropout": 0,
36
+ "resblock": "1",
37
+ "resblock_kernel_sizes": [3,7,11],
38
+ "resblock_dilation_sizes": [[1,3,5], [1,3,5], [1,3,5]],
39
+ "upsample_rates": [10,8,2,2],
40
+ "upsample_initial_channel": 512,
41
+ "upsample_kernel_sizes": [20,16,4,4],
42
+ "use_spectral_norm": false,
43
+ "gin_channels": 256,
44
+ "spk_embed_dim": 109
45
+ }
46
+ }
configs/v2/48k.json ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "train": {
3
+ "log_interval": 200,
4
+ "seed": 1234,
5
+ "epochs": 20000,
6
+ "learning_rate": 1e-4,
7
+ "betas": [0.8, 0.99],
8
+ "eps": 1e-9,
9
+ "batch_size": 4,
10
+ "fp16_run": true,
11
+ "lr_decay": 0.999875,
12
+ "segment_size": 17280,
13
+ "init_lr_ratio": 1,
14
+ "warmup_epochs": 0,
15
+ "c_mel": 45,
16
+ "c_kl": 1.0
17
+ },
18
+ "data": {
19
+ "max_wav_value": 32768.0,
20
+ "sampling_rate": 48000,
21
+ "filter_length": 2048,
22
+ "hop_length": 480,
23
+ "win_length": 2048,
24
+ "n_mel_channels": 128,
25
+ "mel_fmin": 0.0,
26
+ "mel_fmax": null
27
+ },
28
+ "model": {
29
+ "inter_channels": 192,
30
+ "hidden_channels": 192,
31
+ "filter_channels": 768,
32
+ "n_heads": 2,
33
+ "n_layers": 6,
34
+ "kernel_size": 3,
35
+ "p_dropout": 0,
36
+ "resblock": "1",
37
+ "resblock_kernel_sizes": [3,7,11],
38
+ "resblock_dilation_sizes": [[1,3,5], [1,3,5], [1,3,5]],
39
+ "upsample_rates": [12,10,2,2],
40
+ "upsample_initial_channel": 512,
41
+ "upsample_kernel_sizes": [24,20,4,4],
42
+ "use_spectral_norm": false,
43
+ "gin_channels": 256,
44
+ "spk_embed_dim": 109
45
+ }
46
+ }
demo.py ADDED
@@ -0,0 +1,439 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from original import *
2
+ import shutil, glob
3
+ from easyfuncs import download_from_url, CachedModels
4
+ os.makedirs("dataset",exist_ok=True)
5
+ model_library = CachedModels()
6
+
7
+ with gr.Blocks(title="🔊",theme=gr.themes.Base(primary_hue="rose",neutral_hue="zinc")) as app:
8
+ with gr.Row():
9
+ gr.HTML("<img src='file/a.png' alt='image'>")
10
+ with gr.Tabs():
11
+ with gr.TabItem("Inference"):
12
+ with gr.Row():
13
+ voice_model = gr.Dropdown(label="Model Voice", choices=sorted(names), value=lambda:sorted(names)[0] if len(sorted(names)) > 0 else '', interactive=True)
14
+ refresh_button = gr.Button("Refresh", variant="primary")
15
+ spk_item = gr.Slider(
16
+ minimum=0,
17
+ maximum=2333,
18
+ step=1,
19
+ label="Speaker ID",
20
+ value=0,
21
+ visible=False,
22
+ interactive=True,
23
+ )
24
+ vc_transform0 = gr.Number(
25
+ label="Pitch",
26
+ value=0
27
+ )
28
+ but0 = gr.Button(value="Convert", variant="primary")
29
+ with gr.Row():
30
+ with gr.Column():
31
+ with gr.Row():
32
+ dropbox = gr.File(label="Drop your audio here & hit the Reload button.")
33
+ with gr.Row():
34
+ record_button=gr.Audio(source="microphone", label="OR Record audio.", type="filepath")
35
+ with gr.Row():
36
+ paths_for_files = lambda path:[os.path.abspath(os.path.join(path, f)) for f in os.listdir(path) if os.path.splitext(f)[1].lower() in ('.mp3', '.wav', '.flac', '.ogg')]
37
+ input_audio0 = gr.Dropdown(
38
+ label="Input Path",
39
+ value=paths_for_files('audios')[0] if len(paths_for_files('audios')) > 0 else '',
40
+ choices=paths_for_files('audios'), # Only show absolute paths for audio files ending in .mp3, .wav, .flac or .ogg
41
+ allow_custom_value=True
42
+ )
43
+ with gr.Row():
44
+ audio_player = gr.Audio()
45
+ input_audio0.change(
46
+ inputs=[input_audio0],
47
+ outputs=[audio_player],
48
+ fn=lambda path: {"value":path,"__type__":"update"} if os.path.exists(path) else None
49
+ )
50
+ record_button.stop_recording(
51
+ fn=lambda audio:audio, #TODO save wav lambda
52
+ inputs=[record_button],
53
+ outputs=[input_audio0])
54
+ dropbox.upload(
55
+ fn=lambda audio:audio.name,
56
+ inputs=[dropbox],
57
+ outputs=[input_audio0])
58
+ with gr.Column():
59
+ with gr.Accordion("Change Index", open=False):
60
+ file_index2 = gr.Dropdown(
61
+ label="Change Index",
62
+ choices=sorted(index_paths),
63
+ interactive=True,
64
+ value=sorted(index_paths)[0] if len(sorted(index_paths)) > 0 else ''
65
+ )
66
+ index_rate1 = gr.Slider(
67
+ minimum=0,
68
+ maximum=1,
69
+ label="Index Strength",
70
+ value=0.5,
71
+ interactive=True,
72
+ )
73
+ vc_output2 = gr.Audio(label="Output")
74
+ with gr.Accordion("General Settings", open=False):
75
+ f0method0 = gr.Radio(
76
+ label="Method",
77
+ choices=["pm", "harvest", "crepe", "rmvpe"]
78
+ if config.dml == False
79
+ else ["pm", "harvest", "rmvpe"],
80
+ value="rmvpe",
81
+ interactive=True,
82
+ )
83
+ filter_radius0 = gr.Slider(
84
+ minimum=0,
85
+ maximum=7,
86
+ label="Breathiness Reduction (Harvest only)",
87
+ value=3,
88
+ step=1,
89
+ interactive=True,
90
+ )
91
+ resample_sr0 = gr.Slider(
92
+ minimum=0,
93
+ maximum=48000,
94
+ label="Resample",
95
+ value=0,
96
+ step=1,
97
+ interactive=True,
98
+ visible=False
99
+ )
100
+ rms_mix_rate0 = gr.Slider(
101
+ minimum=0,
102
+ maximum=1,
103
+ label="Volume Normalization",
104
+ value=0,
105
+ interactive=True,
106
+ )
107
+ protect0 = gr.Slider(
108
+ minimum=0,
109
+ maximum=0.5,
110
+ label="Breathiness Protection (0 is enabled, 0.5 is disabled)",
111
+ value=0.33,
112
+ step=0.01,
113
+ interactive=True,
114
+ )
115
+ if voice_model != None: vc.get_vc(voice_model.value,protect0,protect0)
116
+ file_index1 = gr.Textbox(
117
+ label="Index Path",
118
+ interactive=True,
119
+ visible=False#Not used here
120
+ )
121
+ refresh_button.click(
122
+ fn=change_choices,
123
+ inputs=[],
124
+ outputs=[voice_model, file_index2],
125
+ api_name="infer_refresh",
126
+ )
127
+ refresh_button.click(
128
+ fn=lambda:{"choices":paths_for_files('audios'),"__type__":"update"}, #TODO check if properly returns a sorted list of audio files in the 'audios' folder that have the extensions '.wav', '.mp3', '.ogg', or '.flac'
129
+ inputs=[],
130
+ outputs = [input_audio0],
131
+ )
132
+ refresh_button.click(
133
+ fn=lambda:{"value":paths_for_files('audios')[0],"__type__":"update"} if len(paths_for_files('audios')) > 0 else {"value":"","__type__":"update"}, #TODO check if properly returns a sorted list of audio files in the 'audios' folder that have the extensions '.wav', '.mp3', '.ogg', or '.flac'
134
+ inputs=[],
135
+ outputs = [input_audio0],
136
+ )
137
+ with gr.Row():
138
+ f0_file = gr.File(label="F0 Path", visible=False)
139
+ with gr.Row():
140
+ vc_output1 = gr.Textbox(label="Information", placeholder="Welcome!",visible=False)
141
+ but0.click(
142
+ vc.vc_single,
143
+ [
144
+ spk_item,
145
+ input_audio0,
146
+ vc_transform0,
147
+ f0_file,
148
+ f0method0,
149
+ file_index1,
150
+ file_index2,
151
+ index_rate1,
152
+ filter_radius0,
153
+ resample_sr0,
154
+ rms_mix_rate0,
155
+ protect0,
156
+ ],
157
+ [vc_output1, vc_output2],
158
+ api_name="infer_convert",
159
+ )
160
+ voice_model.change(
161
+ fn=vc.get_vc,
162
+ inputs=[voice_model, protect0, protect0],
163
+ outputs=[spk_item, protect0, protect0, file_index2, file_index2],
164
+ api_name="infer_change_voice",
165
+ )
166
+ with gr.TabItem("Download Models"):
167
+ with gr.Row():
168
+ url_input = gr.Textbox(label="URL to model", value="",placeholder="https://...", scale=6)
169
+ name_output = gr.Textbox(label="Save as", value="",placeholder="MyModel",scale=2)
170
+ url_download = gr.Button(value="Download Model",scale=2)
171
+ url_download.click(
172
+ inputs=[url_input,name_output],
173
+ outputs=[url_input],
174
+ fn=download_from_url,
175
+ )
176
+ with gr.Row():
177
+ model_browser = gr.Dropdown(choices=list(model_library.models.keys()),label="OR Search Models (Quality UNKNOWN)",scale=5)
178
+ download_from_browser = gr.Button(value="Get",scale=2)
179
+ download_from_browser.click(
180
+ inputs=[model_browser],
181
+ outputs=[model_browser],
182
+ fn=lambda model: download_from_url(model_library.models[model],model),
183
+ )
184
+ with gr.TabItem("Train"):
185
+ with gr.Row():
186
+ with gr.Column():
187
+ training_name = gr.Textbox(label="Name your model", value="My-Voice",placeholder="My-Voice")
188
+ np7 = gr.Slider(
189
+ minimum=0,
190
+ maximum=config.n_cpu,
191
+ step=1,
192
+ label="Number of CPU processes used to extract pitch features",
193
+ value=int(np.ceil(config.n_cpu / 1.5)),
194
+ interactive=True,
195
+ )
196
+ sr2 = gr.Radio(
197
+ label="Sampling Rate",
198
+ choices=["40k", "32k"],
199
+ value="32k",
200
+ interactive=True,
201
+ visible=False
202
+ )
203
+ if_f0_3 = gr.Radio(
204
+ label="Will your model be used for singing? If not, you can ignore this.",
205
+ choices=[True, False],
206
+ value=True,
207
+ interactive=True,
208
+ visible=False
209
+ )
210
+ version19 = gr.Radio(
211
+ label="Version",
212
+ choices=["v1", "v2"],
213
+ value="v2",
214
+ interactive=True,
215
+ visible=False,
216
+ )
217
+ dataset_folder = gr.Textbox(
218
+ label="dataset folder", value='dataset'
219
+ )
220
+ easy_uploader = gr.Files(label="Drop your audio files here",file_types=['audio'])
221
+ but1 = gr.Button("1. Process", variant="primary")
222
+ info1 = gr.Textbox(label="Information", value="",visible=True)
223
+ easy_uploader.upload(inputs=[dataset_folder],outputs=[],fn=lambda folder:os.makedirs(folder,exist_ok=True))
224
+ easy_uploader.upload(
225
+ fn=lambda files,folder: [shutil.copy2(f.name,os.path.join(folder,os.path.split(f.name)[1])) for f in files] if folder != "" else gr.Warning('Please enter a folder name for your dataset'),
226
+ inputs=[easy_uploader, dataset_folder],
227
+ outputs=[])
228
+ gpus6 = gr.Textbox(
229
+ label="Enter the GPU numbers to use separated by -, (e.g. 0-1-2)",
230
+ value=gpus,
231
+ interactive=True,
232
+ visible=F0GPUVisible,
233
+ )
234
+ gpu_info9 = gr.Textbox(
235
+ label="GPU Info", value=gpu_info, visible=F0GPUVisible
236
+ )
237
+ spk_id5 = gr.Slider(
238
+ minimum=0,
239
+ maximum=4,
240
+ step=1,
241
+ label="Speaker ID",
242
+ value=0,
243
+ interactive=True,
244
+ visible=False
245
+ )
246
+ but1.click(
247
+ preprocess_dataset,
248
+ [dataset_folder, training_name, sr2, np7],
249
+ [info1],
250
+ api_name="train_preprocess",
251
+ )
252
+ with gr.Column():
253
+ f0method8 = gr.Radio(
254
+ label="F0 extraction method",
255
+ choices=["pm", "harvest", "dio", "rmvpe", "rmvpe_gpu"],
256
+ value="rmvpe_gpu",
257
+ interactive=True,
258
+ )
259
+ gpus_rmvpe = gr.Textbox(
260
+ label="GPU numbers to use separated by -, (e.g. 0-1-2)",
261
+ value="%s-%s" % (gpus, gpus),
262
+ interactive=True,
263
+ visible=F0GPUVisible,
264
+ )
265
+ but2 = gr.Button("2. Extract Features", variant="primary")
266
+ info2 = gr.Textbox(label="Information", value="", max_lines=8)
267
+ f0method8.change(
268
+ fn=change_f0_method,
269
+ inputs=[f0method8],
270
+ outputs=[gpus_rmvpe],
271
+ )
272
+ but2.click(
273
+ extract_f0_feature,
274
+ [
275
+ gpus6,
276
+ np7,
277
+ f0method8,
278
+ if_f0_3,
279
+ training_name,
280
+ version19,
281
+ gpus_rmvpe,
282
+ ],
283
+ [info2],
284
+ api_name="train_extract_f0_feature",
285
+ )
286
+ with gr.Column():
287
+ total_epoch11 = gr.Slider(
288
+ minimum=2,
289
+ maximum=1000,
290
+ step=1,
291
+ label="Epochs (more epochs may improve quality but takes longer)",
292
+ value=150,
293
+ interactive=True,
294
+ )
295
+ but4 = gr.Button("3. Train Index", variant="primary")
296
+ but3 = gr.Button("4. Train Model", variant="primary")
297
+ info3 = gr.Textbox(label="Information", value="", max_lines=10)
298
+ with gr.Accordion(label="General Settings", open=False):
299
+ gpus16 = gr.Textbox(
300
+ label="GPUs separated by -, (e.g. 0-1-2)",
301
+ value="0",
302
+ interactive=True,
303
+ visible=True
304
+ )
305
+ save_epoch10 = gr.Slider(
306
+ minimum=1,
307
+ maximum=50,
308
+ step=1,
309
+ label="Weight Saving Frequency",
310
+ value=25,
311
+ interactive=True,
312
+ )
313
+ batch_size12 = gr.Slider(
314
+ minimum=1,
315
+ maximum=40,
316
+ step=1,
317
+ label="Batch Size",
318
+ value=default_batch_size,
319
+ interactive=True,
320
+ )
321
+ if_save_latest13 = gr.Radio(
322
+ label="Only save the latest model",
323
+ choices=["yes", "no"],
324
+ value="yes",
325
+ interactive=True,
326
+ visible=False
327
+ )
328
+ if_cache_gpu17 = gr.Radio(
329
+ label="If your dataset is UNDER 10 minutes, cache it to train faster",
330
+ choices=["yes", "no"],
331
+ value="no",
332
+ interactive=True,
333
+ )
334
+ if_save_every_weights18 = gr.Radio(
335
+ label="Save small model at every save point",
336
+ choices=["yes", "no"],
337
+ value="yes",
338
+ interactive=True,
339
+ )
340
+ with gr.Accordion(label="Change pretrains", open=False):
341
+ pretrained = lambda sr, letter: [os.path.abspath(os.path.join('assets/pretrained_v2', file)) for file in os.listdir('assets/pretrained_v2') if file.endswith('.pth') and sr in file and letter in file]
342
+ pretrained_G14 = gr.Dropdown(
343
+ label="pretrained G",
344
+ # Get a list of all pretrained G model files in assets/pretrained_v2 that end with .pth
345
+ choices = pretrained(sr2.value, 'G'),
346
+ value=pretrained(sr2.value, 'G')[0] if len(pretrained(sr2.value, 'G')) > 0 else '',
347
+ interactive=True,
348
+ visible=True
349
+ )
350
+ pretrained_D15 = gr.Dropdown(
351
+ label="pretrained D",
352
+ choices = pretrained(sr2.value, 'D'),
353
+ value= pretrained(sr2.value, 'D')[0] if len(pretrained(sr2.value, 'G')) > 0 else '',
354
+ visible=True,
355
+ interactive=True
356
+ )
357
+ with gr.Row():
358
+ download_model = gr.Button('5.Download Model')
359
+ with gr.Row():
360
+ model_files = gr.Files(label='Your Model and Index file can be downloaded here:')
361
+ download_model.click(
362
+ fn=lambda name: os.listdir(f'assets/weights/{name}') + glob.glob(f'logs/{name.split(".")[0]}/added_*.index'),
363
+ inputs=[training_name],
364
+ outputs=[model_files, info3])
365
+ with gr.Row():
366
+ sr2.change(
367
+ change_sr2,
368
+ [sr2, if_f0_3, version19],
369
+ [pretrained_G14, pretrained_D15],
370
+ )
371
+ version19.change(
372
+ change_version19,
373
+ [sr2, if_f0_3, version19],
374
+ [pretrained_G14, pretrained_D15, sr2],
375
+ )
376
+ if_f0_3.change(
377
+ change_f0,
378
+ [if_f0_3, sr2, version19],
379
+ [f0method8, pretrained_G14, pretrained_D15],
380
+ )
381
+ with gr.Row():
382
+ but5 = gr.Button("1 Click Training", variant="primary", visible=False)
383
+ but3.click(
384
+ click_train,
385
+ [
386
+ training_name,
387
+ sr2,
388
+ if_f0_3,
389
+ spk_id5,
390
+ save_epoch10,
391
+ total_epoch11,
392
+ batch_size12,
393
+ if_save_latest13,
394
+ pretrained_G14,
395
+ pretrained_D15,
396
+ gpus16,
397
+ if_cache_gpu17,
398
+ if_save_every_weights18,
399
+ version19,
400
+ ],
401
+ info3,
402
+ api_name="train_start",
403
+ )
404
+ but4.click(train_index, [training_name, version19], info3)
405
+ but5.click(
406
+ train1key,
407
+ [
408
+ training_name,
409
+ sr2,
410
+ if_f0_3,
411
+ dataset_folder,
412
+ spk_id5,
413
+ np7,
414
+ f0method8,
415
+ save_epoch10,
416
+ total_epoch11,
417
+ batch_size12,
418
+ if_save_latest13,
419
+ pretrained_G14,
420
+ pretrained_D15,
421
+ gpus16,
422
+ if_cache_gpu17,
423
+ if_save_every_weights18,
424
+ version19,
425
+ gpus_rmvpe,
426
+ ],
427
+ info3,
428
+ api_name="train_start_all",
429
+ )
430
+
431
+ if config.iscolab:
432
+ app.queue(concurrency_count=511, max_size=1022).launch(share=True)
433
+ else:
434
+ app.queue(concurrency_count=511, max_size=1022).launch(
435
+ server_name="0.0.0.0",
436
+ inbrowser=not config.noautoopen,
437
+ server_port=config.listen_port,
438
+ quiet=True,
439
+ )
docker-compose.yml ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ version: "3.8"
2
+ services:
3
+ rvc:
4
+ build:
5
+ context: .
6
+ dockerfile: Dockerfile
7
+ container_name: rvc
8
+ volumes:
9
+ - ./weights:/app/assets/weights
10
+ - ./opt:/app/opt
11
+ # - ./dataset:/app/dataset # you can use this folder in order to provide your dataset for model training
12
+ ports:
13
+ - 7865:7865
14
+ deploy:
15
+ resources:
16
+ reservations:
17
+ devices:
18
+ - driver: nvidia
19
+ count: 1
20
+ capabilities: [gpu]
docs/cn/Changelog_CN.md ADDED
@@ -0,0 +1,109 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ### 20231006更新
2
+
3
+ 我们制作了一个用于实时变声的界面go-realtime-gui.bat/gui_v1.py(事实上早就存在了),本次更新重点也优化了实时变声的性能。对比0813版:
4
+ - 1、优优化界面操作:参数热更新(调整参数不需要中止再启动),懒加载模型(已加载过的模型不需要重新加载),增加响度因子参数(响度向输入音频靠近)
5
+ - 2、优化自带降噪效果与速度
6
+ - 3、大幅优化推理速度
7
+
8
+ 注意输入输出设备应该选择同种类型,例如都选MME类型。
9
+
10
+ 1006版本整体的更新为:
11
+ - 1、继续提升rmvpe音高提取算法效果,对于男低音有更大的提升
12
+ - 2、优化推理界面布局
13
+
14
+ ### 20230813更新
15
+ 1-常规bug修复
16
+ - 保存频率总轮数最低改为1 总轮数最低改为2
17
+ - 修复无pretrain模型训练报错
18
+ - 增加伴奏人声分离完毕清理显存
19
+ - faiss保存路径绝对路径改为相对路径
20
+ - 支持路径包含空格(训练集路径+实验名称均支持,不再会报错)
21
+ - filelist取消强制utf8编码
22
+ - 解决实时变声中开启索引导致的CPU极大占用问题
23
+
24
+ 2-重点更新
25
+ - 训练出当前最强开源人声音高提取模型RMVPE,并用于RVC的训练、离线/实时推理,支持pytorch/onnx/DirectML
26
+ - 通过pytorch-dml支持A卡和I卡的
27
+ (1)实时变声(2)推理(3)人声伴奏分离(4)训练暂未支持,会切换至CPU训练;通过onnx_dml支持rmvpe_gpu的推理
28
+
29
+ ### 20230618更新
30
+ - v2增加32k和48k两个新预训练模型
31
+ - 修复非f0模型推理报错
32
+ - 对于超过一小时的训练集的索引建立环节,自动kmeans缩小特征处理以加速索引训练、加入和查询
33
+ - 附送一个人声转吉他玩具仓库
34
+ - 数据处理剔除异常值切片
35
+ - onnx导出选项卡
36
+
37
+ 失败的实验:
38
+ - ~~特征检索增加时序维度:寄,没啥效果~~
39
+ - ~~特征检索增加PCAR降维可选项:寄,数据大用kmeans缩小数据量,数据小降维操作耗时比省下的匹配耗时还多~~
40
+ - ~~支持onnx推理(附带仅推理的小压缩包):寄,生成nsf还是需要pytorch~~
41
+ - ~~训练时在音高、gender、eq、噪声等方面对输入进行随机增强:寄,没啥效果~~
42
+ - ~~接入小型声码器调研:寄,效果变差~~
43
+
44
+ todolist:
45
+ - ~~训练集音高识别支持crepe:已经被RMVPE取代,不需要~~
46
+ - ~~多进程harvest推理:已经被RMVPE取代,不需要~~
47
+ - ~~crepe的精度支持和RVC-config同步:已经被RMVPE取代,不需要。支持这个还要同步torchcrepe的库,麻烦~~
48
+ - 对接F0编辑器
49
+
50
+
51
+ ### 20230528更新
52
+ - 增加v2的jupyter notebook,韩文changelog,增加一些环境依赖
53
+ - 增加呼吸、清辅音、齿音保护模式
54
+ - 支持crepe-full推理
55
+ - UVR5人声伴奏分离加上3个去延迟模型和MDX-Net去混响模型,增加HP3人声提取模型
56
+ - 索引名称增加版本和实验名称
57
+ - 人声伴奏分离、推理批量导出增加音频导出格式选项
58
+ - 废弃32k模型的训练
59
+
60
+ ### 20230513更新
61
+ - 清除一键包内部老版本runtime内残留的lib.infer_pack和uvr5_pack
62
+ - 修复训练集预处理伪多进程的bug
63
+ - 增加harvest识别音高可选通过中值滤波削弱哑音现象,可调整中值滤波半径
64
+ - 导出音频增加后处理重采样
65
+ - 训练n_cpu进程数从"仅调整f0提取"改为"调整数据预处理和f0提取"
66
+ - 自动检测logs文件夹下的index路径,提供下拉列表功能
67
+ - tab页增加"常见问题解答"(也可参考github-rvc-wiki)
68
+ - 相同路径的输入音频推理增加了音高缓存(用途:使用harvest音高提取,整个pipeline会经历漫长且重复的音高提取过程,如果不使用缓存,实验不同音色、索引、音高中值滤波半径参数的用户在第一次测试后的等待结果会非常痛苦)
69
+
70
+ ### 20230514更新
71
+ - 音量包络对齐输入混合(可以缓解“输入静音输出小幅度噪声”的问题。如果输入音频背景底噪大则不建议开启,默认不开启(值为1可视为不开启))
72
+ - 支持按照指定频率保存提取的小模型(假如你想尝试不同epoch下的推理效果,但是不想保存所有大checkpoint并且每次都要ckpt手工处理提取小模型,这项功能会非常实用)
73
+ - 通过设置环境变量解决服务端开了系统全局代理导致浏览器连接错误的问题
74
+ - 支持v2预训练模型(目前只公开了40k版本进行测试,另外2个采样率还没有训练完全)
75
+ - 推理前限制超过1的过大音量
76
+ - 微调数据预处理参数
77
+
78
+
79
+ ### 20230409更新
80
+ - 修正训练参数,提升显卡平均利用率,A100最高从25%提升至90%左右,V100:50%->90%左右,2060S:60%->85%左右,P40:25%->95%左右,训练速度显著提升
81
+ - 修正参数:总batch_size改为每张卡的batch_size
82
+ - 修正total_epoch:最大限制100解锁至1000;默认10提升至默认20
83
+ - 修复ckpt提取识别是否带音高错误导致推理异常的问题
84
+ - 修复分布式训练每个rank都保存一次ckpt���问题
85
+ - 特征提取进行nan特征过滤
86
+ - 修复静音输入输出随机辅音or噪声的问题(老版模型需要重做训练集重训)
87
+
88
+ ### 20230416更新
89
+ - 新增本地实时变声迷你GUI,双击go-realtime-gui.bat启动
90
+ - 训练推理均对<50Hz的频段进行滤波过滤
91
+ - 训练推理音高提取pyworld最低音高从默认80下降至50,50-80hz间的男声低音不会哑
92
+ - WebUI支持根据系统区域变更语言(现支持en_US,ja_JP,zh_CN,zh_HK,zh_SG,zh_TW,不支持的默认en_US)
93
+ - 修正部分显卡识别(例如V100-16G识别失败,P4识别失败)
94
+
95
+ ### 20230428更新
96
+ - 升级faiss索引设置,速度更快,质量更高
97
+ - 取消total_npy依赖,后续分享模型不再需要填写total_npy
98
+ - 解锁16系限制。4G显存GPU给到4G的推理设置。
99
+ - 修复部分音频格式下UVR5人声伴奏分离的bug
100
+ - 实时变声迷你gui增加对非40k与不懈怠音高模型的支持
101
+
102
+ ### 后续计划:
103
+ 功能:
104
+ - 支持多人训练选项卡(至多4人)
105
+
106
+ 底模:
107
+ - 收集呼吸wav加入训练集修正呼吸变声电音的问题
108
+ - 我们正在训练增加了歌声训练集的底模,未来会公开
109
+