diff --git a/.gitattributes b/.gitattributes
index a6344aac8c09253b3b630fb776ae94478aa0275b..602ae25dbbfaa71459d556435bdf0200ddb137aa 100644
--- a/.gitattributes
+++ b/.gitattributes
@@ -33,3 +33,8 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
*.zip filter=lfs diff=lfs merge=lfs -text
*.zst filter=lfs diff=lfs merge=lfs -text
*tfevents* filter=lfs diff=lfs merge=lfs -text
+BanG-Dream-MyGO/Mutsumi-Chan.gif filter=lfs diff=lfs merge=lfs -text
+Bocchi-the-Rock/Bocchi[[:space:]]Chan.gif filter=lfs diff=lfs merge=lfs -text
+Bocchi-the-Rock/Bocchi-the-Rock.PNG filter=lfs diff=lfs merge=lfs -text
+DATE-A-LIVE/kurumi-tokisaki.gif filter=lfs diff=lfs merge=lfs -text
+Waifu-Anime-RCV/soyo-nagasaki.gif filter=lfs diff=lfs merge=lfs -text
diff --git a/BanG-Dream-MyGO/Dockerfile b/BanG-Dream-MyGO/Dockerfile
new file mode 100644
index 0000000000000000000000000000000000000000..17d810b0d9b25a0c1cff4b35336291194051e592
--- /dev/null
+++ b/BanG-Dream-MyGO/Dockerfile
@@ -0,0 +1,54 @@
+# Gunakan base image dengan hash agar match cache Hugging Face
+FROM python:3.10@sha256:875c3591e586f66aa65621926230925144920c951902a6c2eef005d9783a7ca7
+
+# Gunakan root dulu buat install awal
+USER root
+
+# Pasang fakeroot + ubah apt-get, lalu buat user UID 1000
+RUN apt-get update && apt-get install -y fakeroot && \
+ mv /usr/bin/apt-get /usr/bin/.apt-get && \
+ echo '#!/usr/bin/env sh\nfakeroot /usr/bin/.apt-get "$@"' > /usr/bin/apt-get && \
+ chmod +x /usr/bin/apt-get && \
+ rm -rf /var/lib/apt/lists/* && \
+ useradd -m -u 1000 user
+
+# Install dependencies umum untuk ML / Gradio / media processing
+RUN apt-get update && apt-get install -y \
+ git \
+ git-lfs \
+ ffmpeg \
+ libsm6 \
+ libxext6 \
+ libgl1-mesa-glx \
+ cmake \
+ rsync \
+ && rm -rf /var/lib/apt/lists/* && \
+ git lfs install
+
+# Switch ke user Hugging Face standard (UID 1000)
+USER user
+ENV HOME=/home/user \
+ PATH=$HOME/.local/bin:$PATH
+
+WORKDIR $HOME/app
+
+# Install pip versi 24.0 secara eksplisit
+RUN pip install --no-cache-dir pip==24.0
+
+# Salin requirements.txt ke tempat sementara
+COPY --chown=1000:1000 requirements.txt /tmp/pre-requirements.txt
+
+# Install Python dependencies dari project
+RUN pip install --no-cache-dir -r /tmp/pre-requirements.txt
+
+# Salin seluruh kode project
+COPY --link --chown=1000:1000 . .
+
+# Simpan semua dependency ke freeze file (buat cache HF)
+RUN pip freeze > /tmp/freeze.txt
+
+# Expose port default Gradio / FastAPI
+EXPOSE 7860
+
+# Jalankan app Python
+CMD ["python3", "app.py", "--api"]
\ No newline at end of file
diff --git a/BanG-Dream-MyGO/Mutsumi-Chan.gif b/BanG-Dream-MyGO/Mutsumi-Chan.gif
new file mode 100644
index 0000000000000000000000000000000000000000..83e931f06a44ebfedd9f7e0fc0bb003d7c78f9c9
--- /dev/null
+++ b/BanG-Dream-MyGO/Mutsumi-Chan.gif
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:63b6e495cf655cdc86be72163d6161e962a541d05bb6a95a274bbed481388838
+size 1229474
diff --git a/BanG-Dream-MyGO/MyGO.PNG b/BanG-Dream-MyGO/MyGO.PNG
new file mode 100644
index 0000000000000000000000000000000000000000..0d571857aaaea53ef5bfba48a06160716756a645
Binary files /dev/null and b/BanG-Dream-MyGO/MyGO.PNG differ
diff --git a/BanG-Dream-MyGO/app.py b/BanG-Dream-MyGO/app.py
new file mode 100644
index 0000000000000000000000000000000000000000..09c038ace8263f83b2cfe62d7961337af67c941e
--- /dev/null
+++ b/BanG-Dream-MyGO/app.py
@@ -0,0 +1,635 @@
+import os
+import json
+import traceback
+import logging
+import gradio as gr
+import numpy as np
+import librosa
+import torch
+import asyncio
+import edge_tts
+import re
+import shutil
+import time
+from datetime import datetime
+from fairseq import checkpoint_utils
+from fairseq.data.dictionary import Dictionary
+from lib.infer_pack.models import (
+ SynthesizerTrnMs256NSFsid,
+ SynthesizerTrnMs256NSFsid_nono,
+ SynthesizerTrnMs768NSFsid,
+ SynthesizerTrnMs768NSFsid_nono,
+)
+from vc_infer_pipeline import VC
+from config import Config
+
+# =============================
+# LOAD ENVIRONMENT VARIABLES (tanpa dotenv)
+# =============================
+HF_TOKEN = os.getenv("HF_TOKEN")
+if HF_TOKEN:
+ print("🔑 Hugging Face token detected")
+ os.environ["HUGGINGFACE_TOKEN"] = HF_TOKEN
+else:
+ print("⚠️ No HF_TOKEN found")
+
+# =============================
+# KONFIGURASI DOWNLOAD OTOMATIS DARI REPO MODEL
+# =============================
+if not os.path.exists("weights"):
+ print("=" * 50)
+ print("🚀 BANGO DREAM MYGO VOICE CONVERSION")
+ print("=" * 50)
+ print("📥 Mendownload weights dan bahan model dari repo Plana-RCV/BanGDream-MyGO...")
+
+ try:
+ from huggingface_hub import snapshot_download
+
+ repo_id = "Plana-Archive/Premium-Model"
+ print(f"📥 Downloading from: {repo_id}")
+ print("📁 Looking for: BanGDream-MyGO")
+
+ # Download dengan pattern yang spesifik untuk BanG Dream MyGO
+ downloaded_path = snapshot_download(
+ repo_id=repo_id,
+ allow_patterns=[
+ "BanGDream-MyGO/weights/**",
+ "BanGDream-MyGO/hubert_base.pt",
+ "BanGDream-MyGO/rmvpe.pt"
+ ],
+ local_dir=".",
+ local_dir_use_symlinks=False,
+ token=HF_TOKEN if HF_TOKEN else None,
+ max_workers=2
+ )
+
+ print("✅ Download completed")
+
+ # Pindahkan file
+ source_dir = "BanGDream-MyGO"
+
+ if os.path.exists(source_dir):
+ print(f"📂 Moving files from: {source_dir}")
+
+ # Pindahkan semua konten
+ for item in os.listdir(source_dir):
+ s = os.path.join(source_dir, item)
+ d = os.path.join(".", item)
+ if os.path.isdir(s):
+ if os.path.exists(d):
+ shutil.rmtree(d)
+ shutil.move(s, d)
+ else:
+ shutil.move(s, d)
+
+ # Hapus folder sumber
+ if os.path.exists(source_dir):
+ shutil.rmtree(source_dir)
+
+ print("✅ Files moved successfully")
+
+ # Buat folder_info.json jika tidak ada
+ folder_info_path = os.path.join("weights", "folder_info.json")
+ if not os.path.exists(folder_info_path):
+ folder_info = {
+ "BanGDream-MyGO": {
+ "title": "BanG Dream! MyGO!!!!!",
+ "folder_path": "BanGDream-MyGO",
+ "description": "Official RVC Weights for BanG Dream! MyGO!!!!! characters",
+ "enable": True
+ }
+ }
+ with open(folder_info_path, "w", encoding="utf-8") as f:
+ json.dump(folder_info, f, indent=2, ensure_ascii=False)
+ print(f"📄 Created folder_info.json")
+
+ else:
+ print("❌ Source directory not found after download!")
+
+ except Exception as e:
+ print(f"⚠️ Download failed: {str(e)}")
+ traceback.print_exc()
+ print("\n📝 Manual setup:")
+ print("1. Create folder: weights/")
+ print("2. Download from: https://huggingface.co/Library-Anime/Plana-RCV/tree/main/BanGDream-MyGO")
+ print("3. Put BanGDream-MyGO folder in weights/")
+
+# Inisialisasi konfigurasi
+config = Config()
+logging.getLogger("numba").setLevel(logging.WARNING)
+logging.getLogger("fairseq").setLevel(logging.WARNING)
+
+# Cache untuk model
+model_cache = {}
+hubert_loaded = False
+hubert_model = None
+
+spaces = True
+if spaces:
+ audio_mode = ["Upload audio", "TTS Audio"]
+else:
+ audio_mode = ["Input path", "Upload audio", "TTS Audio"]
+
+f0method_mode = ["pm", "harvest"]
+if os.path.isfile("rmvpe.pt"):
+ f0method_mode.insert(2, "rmvpe")
+
+def clean_title(title):
+ title = re.sub(r'^BanG Dream[!]?\s*MyGO[!]*\s*-\s*', '', title, flags=re.IGNORECASE)
+ return re.sub(r'\s*-\s*\d+\s*epochs', '', title, flags=re.IGNORECASE)
+
+# OPTIMASI: Audio processing yang lebih cepat
+def _load_audio_input(vc_audio_mode, vc_input, vc_upload, tts_text, spaces_limit=20):
+ temp_file = None
+ try:
+ if vc_audio_mode == "Input path" and vc_input:
+ # Gunakan librosa untuk loading
+ audio, sr = librosa.load(vc_input, sr=16000, mono=True)
+ return audio.astype(np.float32), 16000, None
+
+ elif vc_audio_mode == "Upload audio":
+ if vc_upload is None:
+ raise ValueError("Mohon upload file audio terlebih dahulu!")
+ sampling_rate, audio = vc_upload
+
+ # Konversi ke float32
+ if audio.dtype != np.float32:
+ audio = audio.astype(np.float32) / np.iinfo(audio.dtype).max
+
+ if len(audio.shape) > 1:
+ audio = np.mean(audio, axis=0)
+
+ if sampling_rate != 16000:
+ audio = librosa.resample(audio, orig_sr=sampling_rate, target_sr=16000, res_type='kaiser_fast')
+
+ return audio.astype(np.float32), 16000, None
+
+ elif vc_audio_mode == "TTS Audio":
+ if not tts_text or tts_text.strip() == "":
+ raise ValueError("Mohon masukkan teks untuk TTS!")
+
+ temp_file = "tts_temp.wav"
+ # Async TTS dengan timeout
+ async def tts_task():
+ return await edge_tts.Communicate(tts_text, "ja-JP-NanamiNeural").save(temp_file)
+
+ # Jalankan dengan timeout
+ try:
+ asyncio.run(asyncio.wait_for(tts_task(), timeout=10))
+ except asyncio.TimeoutError:
+ raise ValueError("TTS timeout! Silakan coba lagi.")
+
+ audio, sr = librosa.load(temp_file, sr=16000, mono=True)
+ return audio.astype(np.float32), 16000, temp_file
+
+ except Exception as e:
+ if temp_file and os.path.exists(temp_file):
+ os.remove(temp_file)
+ raise e
+
+ raise ValueError("Invalid audio mode or missing input.")
+
+def adjust_audio_speed(audio, speed):
+ if speed == 1.0:
+ return audio
+ # Gunakan metode yang lebih cepat untuk time stretching
+ return librosa.effects.time_stretch(audio.astype(np.float32), rate=speed)
+
+# OPTIMASI: Fungsi preprocessing audio yang lebih efisien
+def preprocess_audio(audio):
+ # Normalize audio
+ if np.max(np.abs(audio)) > 1.0:
+ audio = audio / np.max(np.abs(audio)) * 0.9
+ return audio.astype(np.float32)
+
+# OPTIMASI: Pipeline inferensi yang lebih cepat
+def create_vc_fn(model_key, tgt_sr, net_g, vc, if_f0, version, file_index):
+ def vc_fn(
+ vc_audio_mode, vc_input, vc_upload, tts_text,
+ f0_up_key, f0_method, index_rate, filter_radius,
+ resample_sr, rms_mix_rate, protect, speed,
+ ):
+ temp_audio_file = None
+ try:
+ # Clear GPU cache sebelum memulai
+ if torch.cuda.is_available():
+ torch.cuda.empty_cache()
+
+ # Preload model ke GPU
+ net_g.to(config.device)
+
+ yield "Status: 🚀 Memproses audio...", None
+
+ # Load audio dengan optimasi
+ audio, sr, temp_audio_file = _load_audio_input(vc_audio_mode, vc_input, vc_upload, tts_text)
+
+ # Preprocess audio
+ audio = preprocess_audio(audio)
+
+ # Konversi ke tensor dengan optimasi memory
+ audio_tensor = torch.FloatTensor(audio).to(config.device)
+
+ times = [0, 0, 0]
+
+ # OPTIMASI: Gunakan batch processing untuk audio yang panjang
+ max_chunk_size = 16000 * 30 # 30 detik per chunk
+ if len(audio) > max_chunk_size:
+ chunks = []
+ for i in range(0, len(audio), max_chunk_size):
+ chunk = audio[i:i + max_chunk_size]
+ chunk_tensor = torch.FloatTensor(chunk).to(config.device)
+
+ chunk_opt = vc.pipeline(
+ hubert_model, net_g, 0, chunk_tensor,
+ "chunk" if vc_input else "temp", times,
+ int(f0_up_key), f0_method, file_index, index_rate,
+ if_f0, filter_radius, tgt_sr, resample_sr,
+ rms_mix_rate, version, protect, f0_file=None,
+ )
+ chunks.append(chunk_opt)
+
+ audio_opt = np.concatenate(chunks)
+ else:
+ # Processing single chunk
+ audio_opt = vc.pipeline(
+ hubert_model, net_g, 0, audio_tensor,
+ vc_input if vc_input else "temp", times,
+ int(f0_up_key), f0_method, file_index, index_rate,
+ if_f0, filter_radius, tgt_sr, resample_sr,
+ rms_mix_rate, version, protect, f0_file=None,
+ )
+
+ # Pastikan audio_opt dalam format float32
+ audio_opt = audio_opt.astype(np.float32)
+
+ # Apply speed adjustment
+ if speed != 1.0:
+ audio_opt = adjust_audio_speed(audio_opt, speed)
+
+ # Normalize output dan pastikan float32
+ if np.max(np.abs(audio_opt)) > 0:
+ audio_opt = (audio_opt / np.max(np.abs(audio_opt)) * 0.9).astype(np.float32)
+
+ # Return format yang sesuai untuk gradio.Audio
+ yield "Status: ✅ Selesai!", (tgt_sr, audio_opt)
+
+ except Exception as e:
+ yield f"❌ Error: {str(e)}\n\n{traceback.format_exc()}", None
+ finally:
+ # Cleanup
+ if temp_audio_file and os.path.exists(temp_audio_file):
+ os.remove(temp_audio_file)
+
+ # Kosongkan GPU cache
+ if torch.cuda.is_available():
+ torch.cuda.empty_cache()
+
+ # Return model ke CPU untuk hemat memory (kecuali untuk cache)
+ if model_key not in model_cache:
+ net_g.to('cpu')
+
+ return vc_fn
+
+def create_model_info_from_files(base_path):
+ """Buat model_info.json berdasarkan file yang sebenarnya ada untuk BanG Dream MyGO"""
+ mygo_dir = os.path.join(base_path, "BanGDream-MyGO")
+ if not os.path.exists(mygo_dir):
+ return
+
+ model_info_path = os.path.join(mygo_dir, "model_info.json")
+
+ # Scan semua karakter dari subfolder
+ model_info = {}
+
+ # Cari semua folder karakter
+ for char_folder in os.listdir(mygo_dir):
+ char_path = os.path.join(mygo_dir, char_folder)
+ if not os.path.isdir(char_path):
+ continue
+
+ # Cari file dalam folder karakter
+ pth_files = [f for f in os.listdir(char_path) if f.endswith('.pth')]
+ index_files = [f for f in os.listdir(char_path) if f.endswith('.index')]
+ image_files = [f for f in os.listdir(char_path) if f.lower().endswith(('.png', '.jpg', '.jpeg'))]
+
+ if not pth_files:
+ continue
+
+ # Format nama karakter untuk judul
+ char_name_formatted = re.sub(r"([a-z])([A-Z])", r"\1 \2", char_folder)
+
+ model_info[char_folder] = {
+ "enable": True,
+ "model_path": pth_files[0],
+ "title": f"MyGO - {char_name_formatted}",
+ "cover": image_files[0] if image_files else "cover.png",
+ "feature_retrieval_library": index_files[0] if index_files else "",
+ "author": "Plana-Archive"
+ }
+
+ with open(model_info_path, "w", encoding="utf-8") as f:
+ json.dump(model_info, f, indent=2, ensure_ascii=False)
+
+ print(f"✅ Created model_info.json with {len(model_info)} characters")
+ return model_info
+
+def load_model():
+ categories = []
+ base_path = "weights"
+
+ if not os.path.exists(base_path):
+ print(f"❌ Folder '{base_path}' not found!")
+ return categories
+
+ # Baca folder_info.json atau buat default
+ folder_info_path = f"{base_path}/folder_info.json"
+ if not os.path.isfile(folder_info_path):
+ print(f"📄 Creating default folder_info.json...")
+ folder_info = {
+ "BanGDream-MyGO": {
+ "title": "BanG Dream! MyGO!!!!!",
+ "folder_path": "BanGDream-MyGO",
+ "description": "Official RVC Weights for BanG Dream! MyGO!!!!! characters",
+ "enable": True
+ }
+ }
+
+ with open(folder_info_path, "w", encoding="utf-8") as f:
+ json.dump(folder_info, f, indent=2, ensure_ascii=False)
+
+ with open(folder_info_path, "r", encoding="utf-8") as f:
+ folder_info = json.load(f)
+
+ for category_name, category_info in folder_info.items():
+ if not category_info.get('enable', True):
+ continue
+
+ category_title, category_folder, description = (
+ category_info['title'],
+ category_info['folder_path'],
+ category_info['description']
+ )
+
+ models = []
+ model_info_path = f"{base_path}/{category_folder}/model_info.json"
+
+ # Jika model_info.json tidak ada, buat dari file yang ada
+ if not os.path.exists(model_info_path):
+ print(f" ⚠️ model_info.json not found, creating from files...")
+ model_info = create_model_info_from_files(base_path)
+ if not model_info:
+ continue
+
+ if os.path.exists(model_info_path):
+ with open(model_info_path, "r", encoding="utf-8") as f:
+ models_info = json.load(f)
+
+ for character_name, info in models_info.items():
+ if not info.get('enable', True):
+ continue
+
+ model_title, model_name, model_author = (
+ info['title'],
+ info['model_path'],
+ info.get("author")
+ )
+
+ # Buat key unik untuk cache
+ cache_key = f"{category_folder}_{character_name}"
+
+ # Gunakan cache jika tersedia
+ if cache_key in model_cache:
+ tgt_sr, net_g, vc, if_f0, version, model_index = model_cache[cache_key]
+ else:
+ model_cover = f"{base_path}/{category_folder}/{character_name}/{info['cover']}"
+ model_index = f"{base_path}/{category_folder}/{character_name}/{info['feature_retrieval_library']}"
+
+ # Load model weights
+ model_path = f"{base_path}/{category_folder}/{character_name}/{model_name}"
+ cpt = torch.load(model_path, map_location="cpu")
+
+ tgt_sr = cpt["config"][-1]
+ cpt["config"][-3] = cpt["weight"]["emb_g.weight"].shape[0]
+ if_f0, version = cpt.get("f0", 1), cpt.get("version", "v1")
+
+ # Inisialisasi model
+ if version == "v1":
+ if if_f0 == 1:
+ net_g = SynthesizerTrnMs256NSFsid(*cpt["config"], is_half=config.is_half)
+ else:
+ net_g = SynthesizerTrnMs256NSFsid_nono(*cpt["config"])
+ else:
+ if if_f0 == 1:
+ net_g = SynthesizerTrnMs768NSFsid(*cpt["config"], is_half=config.is_half)
+ else:
+ net_g = SynthesizerTrnMs768NSFsid_nono(*cpt["config"])
+
+ # Load weights
+ if hasattr(net_g, "enc_q"):
+ del net_g.enc_q
+ net_g.load_state_dict(cpt["weight"], strict=False)
+ net_g.eval().to('cpu') # Simpan di CPU dulu
+
+ # Buat VC instance
+ vc = VC(tgt_sr, config)
+
+ # Cache model
+ model_cache[cache_key] = (tgt_sr, net_g, vc, if_f0, version, model_index)
+
+ models.append((
+ character_name, model_title, model_author,
+ f"{base_path}/{category_folder}/{character_name}/{info['cover']}",
+ version,
+ create_vc_fn(cache_key, tgt_sr, net_g, vc, if_f0, version, model_index)
+ ))
+
+ categories.append([category_title, category_folder, description, models])
+
+ return categories
+
+def load_hubert():
+ global hubert_model, hubert_loaded
+ if hubert_loaded:
+ return
+
+ torch.serialization.add_safe_globals([Dictionary])
+ models, _, _ = checkpoint_utils.load_model_ensemble_and_task(
+ ["hubert_base.pt"],
+ suffix="",
+ )
+ hubert_model = models[0].to(config.device)
+ hubert_model = hubert_model.half() if config.is_half else hubert_model.float()
+ hubert_model.eval()
+ hubert_loaded = True
+
+def change_audio_mode(vc_audio_mode):
+ is_input_path = vc_audio_mode == "Input path"
+ is_upload = vc_audio_mode == "Upload audio"
+ is_tts = vc_audio_mode == "TTS Audio"
+
+ return (
+ gr.Textbox.update(visible=is_input_path),
+ gr.Checkbox.update(visible=is_upload),
+ gr.Audio.update(visible=is_upload),
+ gr.Textbox.update(visible=is_tts, lines=4 if is_tts else 2)
+ )
+
+def use_microphone(microphone):
+ return gr.Audio.update(source="microphone" if microphone else "upload")
+
+# CSS dari app (1).py
+css = """
+@import url('https://fonts.googleapis.com/css2?family=Inter:wght@300;400;500;600;700&family=Quicksand:wght@400;600;700&display=swap');
+body, .gradio-container { background-color: #ffffff !important; font-family: 'Inter', sans-serif !important; }
+footer { display: none !important; }
+.arona-loading-container { display: flex; align-items: center; justify-content: center; gap: 15px; margin-top: 15px; padding: 10px; }
+.loading-text-blue { font-family: 'Quicksand', sans-serif; font-size: 20px; font-weight: 700; color: #00b0ff; letter-spacing: 1px; }
+.loading-gif-small { width: 100px; height: auto; border-radius: 8px; }
+.header-img-container { text-align: center; padding: 10px 0; background: #ffffff !important; }
+.header-img { width: 100%; max-width: 500px; border-radius: 15px; margin: 0 auto; display: block; }
+.status-card { background: #ffffff; border: 1px solid #e1f0ff; border-radius: 14px; padding: 15px 10px; margin: 0 auto 15px auto; max-width: 400px; display: flex; flex-direction: column; align-items: center; }
+.status-online-box { display: flex; align-items: center; gap: 8px; margin-bottom: 12px; }
+.status-details-container { display: flex; width: 100%; justify-content: center; align-items: center; border-top: 1px solid #f0f7ff; padding-top: 10px; }
+.status-detail-item { flex: 1; display: flex; flex-direction: column; align-items: center; text-align: center; }
+.status-detail-item:first-child { border-right: 1px solid #e1f0ff; }
+.status-text-main { font-size: 13px !important; font-weight: 600; color: #546e7a; }
+.status-text-sub { font-size: 11px !important; color: #90a4ae; }
+.dot-online { height: 8px; width: 8px; background-color: #2ecc71; border-radius: 50%; display: inline-block; animation: blink-green 1.5s infinite; }
+@keyframes blink-green { 0% { opacity: 1; } 50% { opacity: 0.4; } 100% { opacity: 1; } }
+.gr-form .gr-block label span, .gr-box label span, .gr-panel label span { background: linear-gradient(135deg, #4fc3f7 0%, #00b0ff 100%) !important; color: white !important; padding: 4px 12px !important; border-radius: 8px !important; font-weight: 600 !important; box-shadow: 0 0 15px rgba(79, 195, 247, 0.4) !important; }
+input[type="range"] { accent-color: #00b0ff !important; }
+.char-scroll-box { display: grid !important; grid-template-columns: repeat(2, 1fr) !important; gap: 12px !important; max-height: 280px; overflow-y: auto; padding: 15px; background: #ffffff; border: 2px solid #eef5ff; border-radius: 14px; }
+.char-card { background: white; padding: 12px; border-radius: 12px; cursor: pointer; border: 1px solid #e1f5fe; border-left: 5px solid #4fc3f7; transition: all 0.2s ease; display: flex; flex-direction: column; height: 65px; }
+.char-name-jp { font-weight: 700; font-size: 11px !important; color: #455a64; }
+.char-name-en { font-size: 8.5px !important; color: #90a4ae; text-transform: uppercase; }
+.speed-section { margin-top: 20px; padding: 18px; border-radius: 20px; background: linear-gradient(135deg, #f0f7ff 0%, #ffffff 100%); border: 2px solid #e1f0ff; }
+.speed-title { font-family: 'Quicksand', sans-serif; font-weight: 700; color: #4ea8de; text-align: center; margin-bottom: 12px; font-size: 14px; }
+.generate-btn { font-family: 'Quicksand', sans-serif; font-weight: 700 !important; background: linear-gradient(135deg, #64b5f6 0%, #2196f3 100%) !important; color: white !important; border-radius: 12px !important; }
+.footer-text { text-align: center; padding: 20px; border-top: 1px solid #f0f4f8; color: #b0bec5; font-size: 11px; }
+.speed-notes-box { font-family: 'Arial'; border: 1px solid #ffd8b2; border-radius: 8px; padding: 12px; background: #fff7ed; border-left: 4px solid #fb923c; margin-top: 10px; }
+.speed-notes-title { color: #c2410c; font-size: 12px; margin: 0 0 5px 0; font-weight: bold; }
+.speed-notes-content { color: #9a3412; font-size: 11px; margin: 0; }
+.video-demo-container { text-align: center; padding: 20px; background: #ffffff; border-radius: 20px; border: 2px solid #e1f0ff; margin: 20px auto; max-width: 800px; }
+.video-demo-title { font-family: 'Quicksand', sans-serif; font-weight: 700; color: #4fc3f7; font-size: 18px; margin-bottom: 15px; }
+.video-demo-player { width: 100%; border-radius: 15px; box-shadow: 0 10px 30px rgba(0, 176, 255, 0.2); }
+"""
+
+if __name__ == '__main__':
+ # Preload hubert model
+ load_hubert()
+
+ # Load models dengan cache
+ categories = load_model()
+ total_models = sum(len(models) for _, _, _, models in categories)
+
+ # Optimasi Gradio dengan queue yang lebih efisien
+ with gr.Blocks(css=css, theme=gr.themes.Soft()) as app:
+ # Update header image untuk BanG Dream MyGO
+ gr.HTML('
')
+
+ gr.HTML(f'''
+
+
🔖 PERINGATAN MINNA 🔖
+
Setelah di Generate Voice, audionya akan muncul beberapa detik dan tunggu aja ya!
+
+
✅ (ON) MODE YURI - SAKI 💚
+
+
+ Your browser does not support the video tag.
+
+
+ """)
+ with gr.Column(scale=1):
+ pass
+
+ # FOOTER
+ gr.HTML(
+ ''
+ )
+
+ # JAVASCRIPT UNTUK MODEL SELECTION
+ app.load(
+ None, None, None,
+ js="""
+ () => {
+ window.selectModel = (cat, mod) => {
+ const tabs = document.querySelectorAll('.tabs .tab-nav button');
+ for (let t of tabs) {
+ if (t.textContent.trim() === cat) {
+ t.click();
+ setTimeout(() => {
+ const mTabs = document.querySelectorAll('.tabs .tab-nav button');
+ for (let mt of mTabs) {
+ if (mt.textContent.trim() === mod) {
+ mt.click();
+ window.scrollTo({top: 0, behavior: 'smooth'});
+ }
+ }
+ }, 100);
+ break;
+ }
+ }
+ }
+ }
+ """
+ )
+
+ app.queue(max_size=10).launch(server_name="0.0.0.0", server_port=7860)
\ No newline at end of file
diff --git a/Waifu-Anime-RCV/config.py b/Waifu-Anime-RCV/config.py
new file mode 100644
index 0000000000000000000000000000000000000000..d6b8c96150d9fcd7e6e21c7a1e48d5ba796b5852
--- /dev/null
+++ b/Waifu-Anime-RCV/config.py
@@ -0,0 +1,99 @@
+import argparse
+import sys
+import torch
+from multiprocessing import cpu_count
+
+class Config:
+ def __init__(self):
+ self.device = "cuda:0"
+ self.is_half = True
+ self.n_cpu = 0
+ self.gpu_name = None
+ self.gpu_mem = None
+ (
+ self.colab,
+ self.api,
+ self.unsupported
+ ) = self.arg_parse()
+ self.x_pad, self.x_query, self.x_center, self.x_max = self.device_config()
+
+ @staticmethod
+ def arg_parse() -> tuple:
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--colab", action="store_true", help="Launch in colab")
+ parser.add_argument("--api", action="store_true", help="Launch with api")
+ parser.add_argument("--unsupported", action="store_true", help="Enable unsupported feature")
+ cmd_opts = parser.parse_args()
+
+ return (
+ cmd_opts.colab,
+ cmd_opts.api,
+ cmd_opts.unsupported
+ )
+
+ # has_mps is only available in nightly pytorch (for now) and MasOS 12.3+.
+ # check `getattr` and try it for compatibility
+ @staticmethod
+ def has_mps() -> bool:
+ if not torch.backends.mps.is_available():
+ return False
+ try:
+ torch.zeros(1).to(torch.device("mps"))
+ return True
+ except Exception:
+ return False
+
+ def device_config(self) -> tuple:
+ if torch.cuda.is_available():
+ i_device = int(self.device.split(":")[-1])
+ self.gpu_name = torch.cuda.get_device_name(i_device)
+ if (
+ ("16" in self.gpu_name and "V100" not in self.gpu_name.upper())
+ or "P40" in self.gpu_name.upper()
+ or "1060" in self.gpu_name
+ or "1070" in self.gpu_name
+ or "1080" in self.gpu_name
+ ):
+ print("INFO: Found GPU", self.gpu_name, ", force to fp32")
+ self.is_half = False
+ else:
+ print("INFO: Found GPU", self.gpu_name)
+ self.gpu_mem = int(
+ torch.cuda.get_device_properties(i_device).total_memory
+ / 1024
+ / 1024
+ / 1024
+ + 0.4
+ )
+ elif self.has_mps():
+ print("INFO: No supported Nvidia GPU found, use MPS instead")
+ self.device = "mps"
+ self.is_half = False
+ else:
+ print("INFO: No supported Nvidia GPU found, use CPU instead")
+ self.device = "cpu"
+ self.is_half = False
+
+ if self.n_cpu == 0:
+ self.n_cpu = cpu_count()
+
+ if self.is_half:
+ # 6G显存配置
+ x_pad = 3
+ x_query = 10
+ x_center = 60
+ x_max = 65
+ else:
+ # 5G显存配置
+ x_pad = 1
+ x_query = 6
+ x_center = 38
+ x_max = 41
+
+ if self.gpu_mem != None and self.gpu_mem <= 4:
+ x_pad = 1
+ x_query = 5
+ x_center = 30
+ x_max = 32
+
+ return x_pad, x_query, x_center, x_max
\ No newline at end of file
diff --git a/Waifu-Anime-RCV/edgetts_db.py b/Waifu-Anime-RCV/edgetts_db.py
new file mode 100644
index 0000000000000000000000000000000000000000..5972c16a6d1bccbcb48157fedaaa1a3763fe7f0b
--- /dev/null
+++ b/Waifu-Anime-RCV/edgetts_db.py
@@ -0,0 +1,232 @@
+tts_order_voice = {
+ 'English-Jenny (Female)': 'en-US-JennyNeural',
+ 'English-Guy (Male)': 'en-US-GuyNeural',
+ 'English-Ana (Female)': 'en-US-AnaNeural',
+ 'English-Aria (Female)': 'en-US-AriaNeural',
+ 'English-Christopher (Male)': 'en-US-ChristopherNeural',
+ 'English-Eric (Male)': 'en-US-EricNeural',
+ 'English-Michelle (Female)': 'en-US-MichelleNeural',
+ 'English-Roger (Male)': 'en-US-RogerNeural',
+ 'Spanish (Mexican)-Dalia (Female)': 'es-MX-DaliaNeural',
+ 'Spanish (Mexican)-Jorge- (Male)': 'es-MX-JorgeNeural',
+ 'Korean-Sun-Hi- (Female)': 'ko-KR-SunHiNeural',
+ 'Korean-InJoon- (Male)': 'ko-KR-InJoonNeural',
+ 'Thai-Premwadee- (Female)': 'th-TH-PremwadeeNeural',
+ 'Thai-Niwat- (Male)': 'th-TH-NiwatNeural',
+ 'Vietnamese-HoaiMy- (Female)': 'vi-VN-HoaiMyNeural',
+ 'Vietnamese-NamMinh- (Male)': 'vi-VN-NamMinhNeural',
+ 'Japanese-Nanami- (Female)': 'ja-JP-NanamiNeural',
+ 'Japanese-Keita- (Male)': 'ja-JP-KeitaNeural',
+ 'French-Denise- (Female)': 'fr-FR-DeniseNeural',
+ 'French-Eloise- (Female)': 'fr-FR-EloiseNeural',
+ 'French-Henri- (Male)': 'fr-FR-HenriNeural',
+ 'Brazilian-Francisca- (Female)': 'pt-BR-FranciscaNeural',
+ 'Brazilian-Antonio- (Male)': 'pt-BR-AntonioNeural',
+ 'Indonesian-Ardi- (Male)': 'id-ID-ArdiNeural',
+ 'Indonesian-Gadis- (Female)': 'id-ID-GadisNeural',
+ 'Hebrew-Avri- (Male)': 'he-IL-AvriNeural',
+ 'Hebrew-Hila- (Female)': 'he-IL-HilaNeural',
+ 'Italian-Isabella- (Female)': 'it-IT-IsabellaNeural',
+ 'Italian-Diego- (Male)': 'it-IT-DiegoNeural',
+ 'Italian-Elsa- (Female)': 'it-IT-ElsaNeural',
+ 'Dutch-Colette- (Female)': 'nl-NL-ColetteNeural',
+ 'Dutch-Fenna- (Female)': 'nl-NL-FennaNeural',
+ 'Dutch-Maarten- (Male)': 'nl-NL-MaartenNeural',
+ 'Malese-Osman- (Male)': 'ms-MY-OsmanNeural',
+ 'Malese-Yasmin- (Female)': 'ms-MY-YasminNeural',
+ 'Norwegian-Pernille- (Female)': 'nb-NO-PernilleNeural',
+ 'Norwegian-Finn- (Male)': 'nb-NO-FinnNeural',
+ 'Swedish-Sofie- (Female)': 'sv-SE-SofieNeural',
+ 'ArabicSwedish-Mattias- (Male)': 'sv-SE-MattiasNeural',
+ 'Arabic-Hamed- (Male)': 'ar-SA-HamedNeural',
+ 'Arabic-Zariyah- (Female)': 'ar-SA-ZariyahNeural',
+ 'Greek-Athina- (Female)': 'el-GR-AthinaNeural',
+ 'Greek-Nestoras- (Male)': 'el-GR-NestorasNeural',
+ 'German-Katja- (Female)': 'de-DE-KatjaNeural',
+ 'German-Amala- (Female)': 'de-DE-AmalaNeural',
+ 'German-Conrad- (Male)': 'de-DE-ConradNeural',
+ 'German-Killian- (Male)': 'de-DE-KillianNeural',
+ 'Afrikaans-Adri- (Female)': 'af-ZA-AdriNeural',
+ 'Afrikaans-Willem- (Male)': 'af-ZA-WillemNeural',
+ 'Ethiopian-Ameha- (Male)': 'am-ET-AmehaNeural',
+ 'Ethiopian-Mekdes- (Female)': 'am-ET-MekdesNeural',
+ 'Arabic (UAD)-Fatima- (Female)': 'ar-AE-FatimaNeural',
+ 'Arabic (UAD)-Hamdan- (Male)': 'ar-AE-HamdanNeural',
+ 'Arabic (Bahrain)-Ali- (Male)': 'ar-BH-AliNeural',
+ 'Arabic (Bahrain)-Laila- (Female)': 'ar-BH-LailaNeural',
+ 'Arabic (Algeria)-Ismael- (Male)': 'ar-DZ-IsmaelNeural',
+ 'Arabic (Egypt)-Salma- (Female)': 'ar-EG-SalmaNeural',
+ 'Arabic (Egypt)-Shakir- (Male)': 'ar-EG-ShakirNeural',
+ 'Arabic (Iraq)-Bassel- (Male)': 'ar-IQ-BasselNeural',
+ 'Arabic (Iraq)-Rana- (Female)': 'ar-IQ-RanaNeural',
+ 'Arabic (Jordan)-Sana- (Female)': 'ar-JO-SanaNeural',
+ 'Arabic (Jordan)-Taim- (Male)': 'ar-JO-TaimNeural',
+ 'Arabic (Kuwait)-Fahed- (Male)': 'ar-KW-FahedNeural',
+ 'Arabic (Kuwait)-Noura- (Female)': 'ar-KW-NouraNeural',
+ 'Arabic (Lebanon)-Layla- (Female)': 'ar-LB-LaylaNeural',
+ 'Arabic (Lebanon)-Rami- (Male)': 'ar-LB-RamiNeural',
+ 'Arabic (Libya)-Iman- (Female)': 'ar-LY-ImanNeural',
+ 'Arabic (Libya)-Omar- (Male)': 'ar-LY-OmarNeural',
+ 'Arabic (Morocco)-Jamal- (Male)': 'ar-MA-JamalNeural',
+ 'Arabic (Morocco)-Mouna- (Female)': 'ar-MA-MounaNeural',
+ 'Arabic (Oman)-Abdullah- (Male)': 'ar-OM-AbdullahNeural',
+ 'Arabic (Oman)-Aysha- (Female)': 'ar-OM-AyshaNeural',
+ 'Arabic (Qatar)-Amal- (Female)': 'ar-QA-AmalNeural',
+ 'Arabic (Qatar)-Moaz- (Male)': 'ar-QA-MoazNeural',
+ 'Arabic (Syrian Arab Republic)-Amany- (Female)': 'ar-SY-AmanyNeural',
+ 'Arabic (Syrian Arab Republic)-Laith- (Male)': 'ar-SY-LaithNeural',
+ 'Arabic (Tunisia)-Hedi- (Male)': 'ar-TN-HediNeural',
+ 'Arabic (Tunisia)-Reem- (Female)': 'ar-TN-ReemNeural',
+ 'Arabic (Yemen )-Maryam- (Female)': 'ar-YE-MaryamNeural',
+ 'Arabic (Yemen )-Saleh- (Male)': 'ar-YE-SalehNeural',
+ 'Azerbaijani-Babek- (Male)': 'az-AZ-BabekNeural',
+ 'Azerbaijani-Banu- (Female)': 'az-AZ-BanuNeural',
+ 'Bulgarian-Borislav- (Male)': 'bg-BG-BorislavNeural',
+ 'Bulgarian-Kalina- (Female)': 'bg-BG-KalinaNeural',
+ 'Bengali (Bangladesh)-Nabanita- (Female)': 'bn-BD-NabanitaNeural',
+ 'Bengali (Bangladesh)-Pradeep- (Male)': 'bn-BD-PradeepNeural',
+ 'Bengali (India)-Bashkar- (Male)': 'bn-IN-BashkarNeural',
+ 'Bengali (India)-Tanishaa- (Female)': 'bn-IN-TanishaaNeural',
+ 'Bosniak (Bosnia and Herzegovina)-Goran- (Male)': 'bs-BA-GoranNeural',
+ 'Bosniak (Bosnia and Herzegovina)-Vesna- (Female)': 'bs-BA-VesnaNeural',
+ 'Catalan (Spain)-Joana- (Female)': 'ca-ES-JoanaNeural',
+ 'Catalan (Spain)-Enric- (Male)': 'ca-ES-EnricNeural',
+ 'Czech (Czech Republic)-Antonin- (Male)': 'cs-CZ-AntoninNeural',
+ 'Czech (Czech Republic)-Vlasta- (Female)': 'cs-CZ-VlastaNeural',
+ 'Welsh (UK)-Aled- (Male)': 'cy-GB-AledNeural',
+ 'Welsh (UK)-Nia- (Female)': 'cy-GB-NiaNeural',
+ 'Danish (Denmark)-Christel- (Female)': 'da-DK-ChristelNeural',
+ 'Danish (Denmark)-Jeppe- (Male)': 'da-DK-JeppeNeural',
+ 'German (Austria)-Ingrid- (Female)': 'de-AT-IngridNeural',
+ 'German (Austria)-Jonas- (Male)': 'de-AT-JonasNeural',
+ 'German (Switzerland)-Jan- (Male)': 'de-CH-JanNeural',
+ 'German (Switzerland)-Leni- (Female)': 'de-CH-LeniNeural',
+ 'English (Australia)-Natasha- (Female)': 'en-AU-NatashaNeural',
+ 'English (Australia)-William- (Male)': 'en-AU-WilliamNeural',
+ 'English (Canada)-Clara- (Female)': 'en-CA-ClaraNeural',
+ 'English (Canada)-Liam- (Male)': 'en-CA-LiamNeural',
+ 'English (UK)-Libby- (Female)': 'en-GB-LibbyNeural',
+ 'English (UK)-Maisie- (Female)': 'en-GB-MaisieNeural',
+ 'English (UK)-Ryan- (Male)': 'en-GB-RyanNeural',
+ 'English (UK)-Sonia- (Female)': 'en-GB-SoniaNeural',
+ 'English (UK)-Thomas- (Male)': 'en-GB-ThomasNeural',
+ 'English (Hong Kong)-Sam- (Male)': 'en-HK-SamNeural',
+ 'English (Hong Kong)-Yan- (Female)': 'en-HK-YanNeural',
+ 'English (Ireland)-Connor- (Male)': 'en-IE-ConnorNeural',
+ 'English (Ireland)-Emily- (Female)': 'en-IE-EmilyNeural',
+ 'English (India)-Neerja- (Female)': 'en-IN-NeerjaNeural',
+ 'English (India)-Prabhat- (Male)': 'en-IN-PrabhatNeural',
+ 'English (Kenya)-Asilia- (Female)': 'en-KE-AsiliaNeural',
+ 'English (Kenya)-Chilemba- (Male)': 'en-KE-ChilembaNeural',
+ 'English (Nigeria)-Abeo- (Male)': 'en-NG-AbeoNeural',
+ 'English (Nigeria)-Ezinne- (Female)': 'en-NG-EzinneNeural',
+ 'English (New Zealand)-Mitchell- (Male)': 'en-NZ-MitchellNeural',
+ 'English (Philippines)-James- (Male)': 'en-PH-JamesNeural',
+ 'English (Philippines)-Rosa- (Female)': 'en-PH-RosaNeural',
+ 'English (Singapore)-Luna- (Female)': 'en-SG-LunaNeural',
+ 'English (Singapore)-Wayne- (Male)': 'en-SG-WayneNeural',
+ 'English (Tanzania)-Elimu- (Male)': 'en-TZ-ElimuNeural',
+ 'English (Tanzania)-Imani- (Female)': 'en-TZ-ImaniNeural',
+ 'English (South Africa)-Leah- (Female)': 'en-ZA-LeahNeural',
+ 'English (South Africa)-Luke- (Male)': 'en-ZA-LukeNeural',
+ 'Spanish (Argentina)-Elena- (Female)': 'es-AR-ElenaNeural',
+ 'Spanish (Argentina)-Tomas- (Male)': 'es-AR-TomasNeural',
+ 'Spanish (Bolivia)-Marcelo- (Male)': 'es-BO-MarceloNeural',
+ 'Spanish (Bolivia)-Sofia- (Female)': 'es-BO-SofiaNeural',
+ 'Spanish (Colombia)-Gonzalo- (Male)': 'es-CO-GonzaloNeural',
+ 'Spanish (Colombia)-Salome- (Female)': 'es-CO-SalomeNeural',
+ 'Spanish (Costa Rica)-Juan- (Male)': 'es-CR-JuanNeural',
+ 'Spanish (Costa Rica)-Maria- (Female)': 'es-CR-MariaNeural',
+ 'Spanish (Cuba)-Belkys- (Female)': 'es-CU-BelkysNeural',
+ 'Spanish (Dominican Republic)-Emilio- (Male)': 'es-DO-EmilioNeural',
+ 'Spanish (Dominican Republic)-Ramona- (Female)': 'es-DO-RamonaNeural',
+ 'Spanish (Ecuador)-Andrea- (Female)': 'es-EC-AndreaNeural',
+ 'Spanish (Ecuador)-Luis- (Male)': 'es-EC-LuisNeural',
+ 'Spanish (Spain)-Alvaro- (Male)': 'es-ES-AlvaroNeural',
+ 'Spanish (Spain)-Elvira- (Female)': 'es-ES-ElviraNeural',
+ 'Spanish (Equatorial Guinea)-Teresa- (Female)': 'es-GQ-TeresaNeural',
+ 'Spanish (Guatemala)-Andres- (Male)': 'es-GT-AndresNeural',
+ 'Spanish (Guatemala)-Marta- (Female)': 'es-GT-MartaNeural',
+ 'Spanish (Honduras)-Carlos- (Male)': 'es-HN-CarlosNeural',
+ 'Spanish (Honduras)-Karla- (Female)': 'es-HN-KarlaNeural',
+ 'Spanish (Nicaragua)-Federico- (Male)': 'es-NI-FedericoNeural',
+ 'Spanish (Nicaragua)-Yolanda- (Female)': 'es-NI-YolandaNeural',
+ 'Spanish (Panama)-Margarita- (Female)': 'es-PA-MargaritaNeural',
+ 'Spanish (Panama)-Roberto- (Male)': 'es-PA-RobertoNeural',
+ 'Spanish (Peru)-Alex- (Male)': 'es-PE-AlexNeural',
+ 'Spanish (Peru)-Camila- (Female)': 'es-PE-CamilaNeural',
+ 'Spanish (Puerto Rico)-Karina- (Female)': 'es-PR-KarinaNeural',
+ 'Spanish (Puerto Rico)-Victor- (Male)': 'es-PR-VictorNeural',
+ 'Spanish (Paraguay)-Mario- (Male)': 'es-PY-MarioNeural',
+ 'Spanish (Paraguay)-Tania- (Female)': 'es-PY-TaniaNeural',
+ 'Spanish (El Salvador)-Lorena- (Female)': 'es-SV-LorenaNeural',
+ 'Spanish (El Salvador)-Rodrigo- (Male)': 'es-SV-RodrigoNeural',
+ 'Spanish (United States)-Alonso- (Male)': 'es-US-AlonsoNeural',
+ 'Spanish (United States)-Paloma- (Female)': 'es-US-PalomaNeural',
+ 'Spanish (Uruguay)-Mateo- (Male)': 'es-UY-MateoNeural',
+ 'Spanish (Uruguay)-Valentina- (Female)': 'es-UY-ValentinaNeural',
+ 'Spanish (Venezuela)-Paola- (Female)': 'es-VE-PaolaNeural',
+ 'Spanish (Venezuela)-Sebastian- (Male)': 'es-VE-SebastianNeural',
+ 'Estonian (Estonia)-Anu- (Female)': 'et-EE-AnuNeural',
+ 'Estonian (Estonia)-Kert- (Male)': 'et-EE-KertNeural',
+ 'Persian (Iran)-Dilara- (Female)': 'fa-IR-DilaraNeural',
+ 'Persian (Iran)-Farid- (Male)': 'fa-IR-FaridNeural',
+ 'Finnish (Finland)-Harri- (Male)': 'fi-FI-HarriNeural',
+ 'Finnish (Finland)-Noora- (Female)': 'fi-FI-NooraNeural',
+ 'French (Belgium)-Charline- (Female)': 'fr-BE-CharlineNeural',
+ 'French (Belgium)-Gerard- (Male)': 'fr-BE-GerardNeural',
+ 'French (Canada)-Sylvie- (Female)': 'fr-CA-SylvieNeural',
+ 'French (Canada)-Antoine- (Male)': 'fr-CA-AntoineNeural',
+ 'French (Canada)-Jean- (Male)': 'fr-CA-JeanNeural',
+ 'French (Switzerland)-Ariane- (Female)': 'fr-CH-ArianeNeural',
+ 'French (Switzerland)-Fabrice- (Male)': 'fr-CH-FabriceNeural',
+ 'Irish (Ireland)-Colm- (Male)': 'ga-IE-ColmNeural',
+ 'Irish (Ireland)-Orla- (Female)': 'ga-IE-OrlaNeural',
+ 'Galician (Spain)-Roi- (Male)': 'gl-ES-RoiNeural',
+ 'Galician (Spain)-Sabela- (Female)': 'gl-ES-SabelaNeural',
+ 'Gujarati (India)-Dhwani- (Female)': 'gu-IN-DhwaniNeural',
+ 'Gujarati (India)-Niranjan- (Male)': 'gu-IN-NiranjanNeural',
+ 'Hindi (India)-Madhur- (Male)': 'hi-IN-MadhurNeural',
+ 'Hindi (India)-Swara- (Female)': 'hi-IN-SwaraNeural',
+ 'Croatian (Croatia)-Gabrijela- (Female)': 'hr-HR-GabrijelaNeural',
+ 'Croatian (Croatia)-Srecko- (Male)': 'hr-HR-SreckoNeural',
+ 'Hungarian (Hungary)-Noemi- (Female)': 'hu-HU-NoemiNeural',
+ 'Hungarian (Hungary)-Tamas- (Male)': 'hu-HU-TamasNeural',
+ 'Icelandic (Iceland)-Gudrun- (Female)': 'is-IS-GudrunNeural',
+ 'Icelandic (Iceland)-Gunnar- (Male)': 'is-IS-GunnarNeural',
+ 'Javanese (Indonesia)-Dimas- (Male)': 'jv-ID-DimasNeural',
+ 'Javanese (Indonesia)-Siti- (Female)': 'jv-ID-SitiNeural',
+ 'Georgian (Georgia)-Eka- (Female)': 'ka-GE-EkaNeural',
+ 'Georgian (Georgia)-Giorgi- (Male)': 'ka-GE-GiorgiNeural',
+ 'Kazakh (Kazakhstan)-Aigul- (Female)': 'kk-KZ-AigulNeural',
+ 'Kazakh (Kazakhstan)-Daulet- (Male)': 'kk-KZ-DauletNeural',
+ 'Khmer (Cambodia)-Piseth- (Male)': 'km-KH-PisethNeural',
+ 'Khmer (Cambodia)-Sreymom- (Female)': 'km-KH-SreymomNeural',
+ 'Kannada (India)-Gagan- (Male)': 'kn-IN-GaganNeural',
+ 'Kannada (India)-Sapna- (Female)': 'kn-IN-SapnaNeural',
+ 'Lao (Laos)-Chanthavong- (Male)': 'lo-LA-ChanthavongNeural',
+ 'Lao (Laos)-Keomany- (Female)': 'lo-LA-KeomanyNeural',
+ 'Lithuanian (Lithuania)-Leonas- (Male)': 'lt-LT-LeonasNeural',
+ 'Lithuanian (Lithuania)-Ona- (Female)': 'lt-LT-OnaNeural',
+ 'Latvian (Latvia)-Everita- (Female)': 'lv-LV-EveritaNeural',
+ 'Latvian (Latvia)-Nils- (Male)': 'lv-LV-NilsNeural',
+ 'Macedonian (North Macedonia)-Aleksandar- (Male)': 'mk-MK-AleksandarNeural',
+ 'Macedonian (North Macedonia)-Marija- (Female)': 'mk-MK-MarijaNeural',
+ 'Malayalam (India)-Midhun- (Male)': 'ml-IN-MidhunNeural',
+ 'Malayalam (India)-Sobhana- (Female)': 'ml-IN-SobhanaNeural',
+ 'Mongolian (Mongolia)-Bataa- (Male)': 'mn-MN-BataaNeural',
+ 'Mongolian (Mongolia)-Yesui- (Female)': 'mn-MN-YesuiNeural',
+ 'Marathi (India)-Aarohi- (Female)': 'mr-IN-AarohiNeural',
+ 'Marathi (India)-Manohar- (Male)': 'mr-IN-ManoharNeural',
+ 'Maltese (Malta)-Grace- (Female)': 'mt-MT-GraceNeural',
+ 'Maltese (Malta)-Joseph- (Male)': 'mt-MT-JosephNeural',
+ 'Burmese (Myanmar)-Nilar- (Female)': 'my-MM-NilarNeural',
+ 'Burmese (Myanmar)-Thiha- (Male)': 'my-MM-ThihaNeural',
+ 'Nepali (Nepal)-Hemkala- (Female)': 'ne-NP-HemkalaNeural',
+ 'Nepali (Nepal)-Sagar- (Male)': 'ne-NP-SagarNeural',
+ 'Dutch (Belgium)-Arnaud- (Male)': 'nl-BE-ArnaudNeural',
+ 'Dutch (Belgium)-Dena- (Female)': 'nl-BE-DenaNeural',
+ 'Polish (Poland)-Marek- (Male)': 'pl-PL-MarekNeural',
+ 'Polish (Poland)-Zofia- (Female)': 'pl-PL-ZofiaNeural',
+ 'Pashto (Afghanistan)-Gul Nawaz- (Male)': 'ps-AF-Gul',
+}
\ No newline at end of file
diff --git a/Waifu-Anime-RCV/hubert_base.pt b/Waifu-Anime-RCV/hubert_base.pt
new file mode 100644
index 0000000000000000000000000000000000000000..72f47ab58564f01d5cc8b05c63bdf96d944551ff
--- /dev/null
+++ b/Waifu-Anime-RCV/hubert_base.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f54b40fd2802423a5643779c4861af1e9ee9c1564dc9d32f54f20b5ffba7db96
+size 189507909
diff --git a/Waifu-Anime-RCV/lib/infer_pack/attentions.py b/Waifu-Anime-RCV/lib/infer_pack/attentions.py
new file mode 100644
index 0000000000000000000000000000000000000000..05501be1871643f78dddbeaa529c96667031a8db
--- /dev/null
+++ b/Waifu-Anime-RCV/lib/infer_pack/attentions.py
@@ -0,0 +1,417 @@
+import copy
+import math
+import numpy as np
+import torch
+from torch import nn
+from torch.nn import functional as F
+
+from lib.infer_pack import commons
+from lib.infer_pack import modules
+from lib.infer_pack.modules import LayerNorm
+
+
+class Encoder(nn.Module):
+ def __init__(
+ self,
+ hidden_channels,
+ filter_channels,
+ n_heads,
+ n_layers,
+ kernel_size=1,
+ p_dropout=0.0,
+ window_size=10,
+ **kwargs
+ ):
+ super().__init__()
+ self.hidden_channels = hidden_channels
+ self.filter_channels = filter_channels
+ self.n_heads = n_heads
+ self.n_layers = n_layers
+ self.kernel_size = kernel_size
+ self.p_dropout = p_dropout
+ self.window_size = window_size
+
+ self.drop = nn.Dropout(p_dropout)
+ self.attn_layers = nn.ModuleList()
+ self.norm_layers_1 = nn.ModuleList()
+ self.ffn_layers = nn.ModuleList()
+ self.norm_layers_2 = nn.ModuleList()
+ for i in range(self.n_layers):
+ self.attn_layers.append(
+ MultiHeadAttention(
+ hidden_channels,
+ hidden_channels,
+ n_heads,
+ p_dropout=p_dropout,
+ window_size=window_size,
+ )
+ )
+ self.norm_layers_1.append(LayerNorm(hidden_channels))
+ self.ffn_layers.append(
+ FFN(
+ hidden_channels,
+ hidden_channels,
+ filter_channels,
+ kernel_size,
+ p_dropout=p_dropout,
+ )
+ )
+ self.norm_layers_2.append(LayerNorm(hidden_channels))
+
+ def forward(self, x, x_mask):
+ attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
+ x = x * x_mask
+ for i in range(self.n_layers):
+ y = self.attn_layers[i](x, x, attn_mask)
+ y = self.drop(y)
+ x = self.norm_layers_1[i](x + y)
+
+ y = self.ffn_layers[i](x, x_mask)
+ y = self.drop(y)
+ x = self.norm_layers_2[i](x + y)
+ x = x * x_mask
+ return x
+
+
+class Decoder(nn.Module):
+ def __init__(
+ self,
+ hidden_channels,
+ filter_channels,
+ n_heads,
+ n_layers,
+ kernel_size=1,
+ p_dropout=0.0,
+ proximal_bias=False,
+ proximal_init=True,
+ **kwargs
+ ):
+ super().__init__()
+ self.hidden_channels = hidden_channels
+ self.filter_channels = filter_channels
+ self.n_heads = n_heads
+ self.n_layers = n_layers
+ self.kernel_size = kernel_size
+ self.p_dropout = p_dropout
+ self.proximal_bias = proximal_bias
+ self.proximal_init = proximal_init
+
+ self.drop = nn.Dropout(p_dropout)
+ self.self_attn_layers = nn.ModuleList()
+ self.norm_layers_0 = nn.ModuleList()
+ self.encdec_attn_layers = nn.ModuleList()
+ self.norm_layers_1 = nn.ModuleList()
+ self.ffn_layers = nn.ModuleList()
+ self.norm_layers_2 = nn.ModuleList()
+ for i in range(self.n_layers):
+ self.self_attn_layers.append(
+ MultiHeadAttention(
+ hidden_channels,
+ hidden_channels,
+ n_heads,
+ p_dropout=p_dropout,
+ proximal_bias=proximal_bias,
+ proximal_init=proximal_init,
+ )
+ )
+ self.norm_layers_0.append(LayerNorm(hidden_channels))
+ self.encdec_attn_layers.append(
+ MultiHeadAttention(
+ hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout
+ )
+ )
+ self.norm_layers_1.append(LayerNorm(hidden_channels))
+ self.ffn_layers.append(
+ FFN(
+ hidden_channels,
+ hidden_channels,
+ filter_channels,
+ kernel_size,
+ p_dropout=p_dropout,
+ causal=True,
+ )
+ )
+ self.norm_layers_2.append(LayerNorm(hidden_channels))
+
+ def forward(self, x, x_mask, h, h_mask):
+ """
+ x: decoder input
+ h: encoder output
+ """
+ self_attn_mask = commons.subsequent_mask(x_mask.size(2)).to(
+ device=x.device, dtype=x.dtype
+ )
+ encdec_attn_mask = h_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
+ x = x * x_mask
+ for i in range(self.n_layers):
+ y = self.self_attn_layers[i](x, x, self_attn_mask)
+ y = self.drop(y)
+ x = self.norm_layers_0[i](x + y)
+
+ y = self.encdec_attn_layers[i](x, h, encdec_attn_mask)
+ y = self.drop(y)
+ x = self.norm_layers_1[i](x + y)
+
+ y = self.ffn_layers[i](x, x_mask)
+ y = self.drop(y)
+ x = self.norm_layers_2[i](x + y)
+ x = x * x_mask
+ return x
+
+
+class MultiHeadAttention(nn.Module):
+ def __init__(
+ self,
+ channels,
+ out_channels,
+ n_heads,
+ p_dropout=0.0,
+ window_size=None,
+ heads_share=True,
+ block_length=None,
+ proximal_bias=False,
+ proximal_init=False,
+ ):
+ super().__init__()
+ assert channels % n_heads == 0
+
+ self.channels = channels
+ self.out_channels = out_channels
+ self.n_heads = n_heads
+ self.p_dropout = p_dropout
+ self.window_size = window_size
+ self.heads_share = heads_share
+ self.block_length = block_length
+ self.proximal_bias = proximal_bias
+ self.proximal_init = proximal_init
+ self.attn = None
+
+ self.k_channels = channels // n_heads
+ self.conv_q = nn.Conv1d(channels, channels, 1)
+ self.conv_k = nn.Conv1d(channels, channels, 1)
+ self.conv_v = nn.Conv1d(channels, channels, 1)
+ self.conv_o = nn.Conv1d(channels, out_channels, 1)
+ self.drop = nn.Dropout(p_dropout)
+
+ if window_size is not None:
+ n_heads_rel = 1 if heads_share else n_heads
+ rel_stddev = self.k_channels**-0.5
+ self.emb_rel_k = nn.Parameter(
+ torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels)
+ * rel_stddev
+ )
+ self.emb_rel_v = nn.Parameter(
+ torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels)
+ * rel_stddev
+ )
+
+ nn.init.xavier_uniform_(self.conv_q.weight)
+ nn.init.xavier_uniform_(self.conv_k.weight)
+ nn.init.xavier_uniform_(self.conv_v.weight)
+ if proximal_init:
+ with torch.no_grad():
+ self.conv_k.weight.copy_(self.conv_q.weight)
+ self.conv_k.bias.copy_(self.conv_q.bias)
+
+ def forward(self, x, c, attn_mask=None):
+ q = self.conv_q(x)
+ k = self.conv_k(c)
+ v = self.conv_v(c)
+
+ x, self.attn = self.attention(q, k, v, mask=attn_mask)
+
+ x = self.conv_o(x)
+ return x
+
+ def attention(self, query, key, value, mask=None):
+ # reshape [b, d, t] -> [b, n_h, t, d_k]
+ b, d, t_s, t_t = (*key.size(), query.size(2))
+ query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3)
+ key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
+ value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
+
+ scores = torch.matmul(query / math.sqrt(self.k_channels), key.transpose(-2, -1))
+ if self.window_size is not None:
+ assert (
+ t_s == t_t
+ ), "Relative attention is only available for self-attention."
+ key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s)
+ rel_logits = self._matmul_with_relative_keys(
+ query / math.sqrt(self.k_channels), key_relative_embeddings
+ )
+ scores_local = self._relative_position_to_absolute_position(rel_logits)
+ scores = scores + scores_local
+ if self.proximal_bias:
+ assert t_s == t_t, "Proximal bias is only available for self-attention."
+ scores = scores + self._attention_bias_proximal(t_s).to(
+ device=scores.device, dtype=scores.dtype
+ )
+ if mask is not None:
+ scores = scores.masked_fill(mask == 0, -1e4)
+ if self.block_length is not None:
+ assert (
+ t_s == t_t
+ ), "Local attention is only available for self-attention."
+ block_mask = (
+ torch.ones_like(scores)
+ .triu(-self.block_length)
+ .tril(self.block_length)
+ )
+ scores = scores.masked_fill(block_mask == 0, -1e4)
+ p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s]
+ p_attn = self.drop(p_attn)
+ output = torch.matmul(p_attn, value)
+ if self.window_size is not None:
+ relative_weights = self._absolute_position_to_relative_position(p_attn)
+ value_relative_embeddings = self._get_relative_embeddings(
+ self.emb_rel_v, t_s
+ )
+ output = output + self._matmul_with_relative_values(
+ relative_weights, value_relative_embeddings
+ )
+ output = (
+ output.transpose(2, 3).contiguous().view(b, d, t_t)
+ ) # [b, n_h, t_t, d_k] -> [b, d, t_t]
+ return output, p_attn
+
+ def _matmul_with_relative_values(self, x, y):
+ """
+ x: [b, h, l, m]
+ y: [h or 1, m, d]
+ ret: [b, h, l, d]
+ """
+ ret = torch.matmul(x, y.unsqueeze(0))
+ return ret
+
+ def _matmul_with_relative_keys(self, x, y):
+ """
+ x: [b, h, l, d]
+ y: [h or 1, m, d]
+ ret: [b, h, l, m]
+ """
+ ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1))
+ return ret
+
+ def _get_relative_embeddings(self, relative_embeddings, length):
+ max_relative_position = 2 * self.window_size + 1
+ # Pad first before slice to avoid using cond ops.
+ pad_length = max(length - (self.window_size + 1), 0)
+ slice_start_position = max((self.window_size + 1) - length, 0)
+ slice_end_position = slice_start_position + 2 * length - 1
+ if pad_length > 0:
+ padded_relative_embeddings = F.pad(
+ relative_embeddings,
+ commons.convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]]),
+ )
+ else:
+ padded_relative_embeddings = relative_embeddings
+ used_relative_embeddings = padded_relative_embeddings[
+ :, slice_start_position:slice_end_position
+ ]
+ return used_relative_embeddings
+
+ def _relative_position_to_absolute_position(self, x):
+ """
+ x: [b, h, l, 2*l-1]
+ ret: [b, h, l, l]
+ """
+ batch, heads, length, _ = x.size()
+ # Concat columns of pad to shift from relative to absolute indexing.
+ x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, 1]]))
+
+ # Concat extra elements so to add up to shape (len+1, 2*len-1).
+ x_flat = x.view([batch, heads, length * 2 * length])
+ x_flat = F.pad(
+ x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [0, length - 1]])
+ )
+
+ # Reshape and slice out the padded elements.
+ x_final = x_flat.view([batch, heads, length + 1, 2 * length - 1])[
+ :, :, :length, length - 1 :
+ ]
+ return x_final
+
+ def _absolute_position_to_relative_position(self, x):
+ """
+ x: [b, h, l, l]
+ ret: [b, h, l, 2*l-1]
+ """
+ batch, heads, length, _ = x.size()
+ # padd along column
+ x = F.pad(
+ x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length - 1]])
+ )
+ x_flat = x.view([batch, heads, length**2 + length * (length - 1)])
+ # add 0's in the beginning that will skew the elements after reshape
+ x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [length, 0]]))
+ x_final = x_flat.view([batch, heads, length, 2 * length])[:, :, :, 1:]
+ return x_final
+
+ def _attention_bias_proximal(self, length):
+ """Bias for self-attention to encourage attention to close positions.
+ Args:
+ length: an integer scalar.
+ Returns:
+ a Tensor with shape [1, 1, length, length]
+ """
+ r = torch.arange(length, dtype=torch.float32)
+ diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1)
+ return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0)
+
+
+class FFN(nn.Module):
+ def __init__(
+ self,
+ in_channels,
+ out_channels,
+ filter_channels,
+ kernel_size,
+ p_dropout=0.0,
+ activation=None,
+ causal=False,
+ ):
+ super().__init__()
+ self.in_channels = in_channels
+ self.out_channels = out_channels
+ self.filter_channels = filter_channels
+ self.kernel_size = kernel_size
+ self.p_dropout = p_dropout
+ self.activation = activation
+ self.causal = causal
+
+ if causal:
+ self.padding = self._causal_padding
+ else:
+ self.padding = self._same_padding
+
+ self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size)
+ self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size)
+ self.drop = nn.Dropout(p_dropout)
+
+ def forward(self, x, x_mask):
+ x = self.conv_1(self.padding(x * x_mask))
+ if self.activation == "gelu":
+ x = x * torch.sigmoid(1.702 * x)
+ else:
+ x = torch.relu(x)
+ x = self.drop(x)
+ x = self.conv_2(self.padding(x * x_mask))
+ return x * x_mask
+
+ def _causal_padding(self, x):
+ if self.kernel_size == 1:
+ return x
+ pad_l = self.kernel_size - 1
+ pad_r = 0
+ padding = [[0, 0], [0, 0], [pad_l, pad_r]]
+ x = F.pad(x, commons.convert_pad_shape(padding))
+ return x
+
+ def _same_padding(self, x):
+ if self.kernel_size == 1:
+ return x
+ pad_l = (self.kernel_size - 1) // 2
+ pad_r = self.kernel_size // 2
+ padding = [[0, 0], [0, 0], [pad_l, pad_r]]
+ x = F.pad(x, commons.convert_pad_shape(padding))
+ return x
diff --git a/Waifu-Anime-RCV/lib/infer_pack/commons.py b/Waifu-Anime-RCV/lib/infer_pack/commons.py
new file mode 100644
index 0000000000000000000000000000000000000000..54470986f37825b35d90d7efa7437d1c26b87215
--- /dev/null
+++ b/Waifu-Anime-RCV/lib/infer_pack/commons.py
@@ -0,0 +1,166 @@
+import math
+import numpy as np
+import torch
+from torch import nn
+from torch.nn import functional as F
+
+
+def init_weights(m, mean=0.0, std=0.01):
+ classname = m.__class__.__name__
+ if classname.find("Conv") != -1:
+ m.weight.data.normal_(mean, std)
+
+
+def get_padding(kernel_size, dilation=1):
+ return int((kernel_size * dilation - dilation) / 2)
+
+
+def convert_pad_shape(pad_shape):
+ l = pad_shape[::-1]
+ pad_shape = [item for sublist in l for item in sublist]
+ return pad_shape
+
+
+def kl_divergence(m_p, logs_p, m_q, logs_q):
+ """KL(P||Q)"""
+ kl = (logs_q - logs_p) - 0.5
+ kl += (
+ 0.5 * (torch.exp(2.0 * logs_p) + ((m_p - m_q) ** 2)) * torch.exp(-2.0 * logs_q)
+ )
+ return kl
+
+
+def rand_gumbel(shape):
+ """Sample from the Gumbel distribution, protect from overflows."""
+ uniform_samples = torch.rand(shape) * 0.99998 + 0.00001
+ return -torch.log(-torch.log(uniform_samples))
+
+
+def rand_gumbel_like(x):
+ g = rand_gumbel(x.size()).to(dtype=x.dtype, device=x.device)
+ return g
+
+
+def slice_segments(x, ids_str, segment_size=4):
+ ret = torch.zeros_like(x[:, :, :segment_size])
+ for i in range(x.size(0)):
+ idx_str = ids_str[i]
+ idx_end = idx_str + segment_size
+ ret[i] = x[i, :, idx_str:idx_end]
+ return ret
+
+
+def slice_segments2(x, ids_str, segment_size=4):
+ ret = torch.zeros_like(x[:, :segment_size])
+ for i in range(x.size(0)):
+ idx_str = ids_str[i]
+ idx_end = idx_str + segment_size
+ ret[i] = x[i, idx_str:idx_end]
+ return ret
+
+
+def rand_slice_segments(x, x_lengths=None, segment_size=4):
+ b, d, t = x.size()
+ if x_lengths is None:
+ x_lengths = t
+ ids_str_max = x_lengths - segment_size + 1
+ ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long)
+ ret = slice_segments(x, ids_str, segment_size)
+ return ret, ids_str
+
+
+def get_timing_signal_1d(length, channels, min_timescale=1.0, max_timescale=1.0e4):
+ position = torch.arange(length, dtype=torch.float)
+ num_timescales = channels // 2
+ log_timescale_increment = math.log(float(max_timescale) / float(min_timescale)) / (
+ num_timescales - 1
+ )
+ inv_timescales = min_timescale * torch.exp(
+ torch.arange(num_timescales, dtype=torch.float) * -log_timescale_increment
+ )
+ scaled_time = position.unsqueeze(0) * inv_timescales.unsqueeze(1)
+ signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], 0)
+ signal = F.pad(signal, [0, 0, 0, channels % 2])
+ signal = signal.view(1, channels, length)
+ return signal
+
+
+def add_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4):
+ b, channels, length = x.size()
+ signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale)
+ return x + signal.to(dtype=x.dtype, device=x.device)
+
+
+def cat_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4, axis=1):
+ b, channels, length = x.size()
+ signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale)
+ return torch.cat([x, signal.to(dtype=x.dtype, device=x.device)], axis)
+
+
+def subsequent_mask(length):
+ mask = torch.tril(torch.ones(length, length)).unsqueeze(0).unsqueeze(0)
+ return mask
+
+
+@torch.jit.script
+def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels):
+ n_channels_int = n_channels[0]
+ in_act = input_a + input_b
+ t_act = torch.tanh(in_act[:, :n_channels_int, :])
+ s_act = torch.sigmoid(in_act[:, n_channels_int:, :])
+ acts = t_act * s_act
+ return acts
+
+
+def convert_pad_shape(pad_shape):
+ l = pad_shape[::-1]
+ pad_shape = [item for sublist in l for item in sublist]
+ return pad_shape
+
+
+def shift_1d(x):
+ x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [1, 0]]))[:, :, :-1]
+ return x
+
+
+def sequence_mask(length, max_length=None):
+ if max_length is None:
+ max_length = length.max()
+ x = torch.arange(max_length, dtype=length.dtype, device=length.device)
+ return x.unsqueeze(0) < length.unsqueeze(1)
+
+
+def generate_path(duration, mask):
+ """
+ duration: [b, 1, t_x]
+ mask: [b, 1, t_y, t_x]
+ """
+ device = duration.device
+
+ b, _, t_y, t_x = mask.shape
+ cum_duration = torch.cumsum(duration, -1)
+
+ cum_duration_flat = cum_duration.view(b * t_x)
+ path = sequence_mask(cum_duration_flat, t_y).to(mask.dtype)
+ path = path.view(b, t_x, t_y)
+ path = path - F.pad(path, convert_pad_shape([[0, 0], [1, 0], [0, 0]]))[:, :-1]
+ path = path.unsqueeze(1).transpose(2, 3) * mask
+ return path
+
+
+def clip_grad_value_(parameters, clip_value, norm_type=2):
+ if isinstance(parameters, torch.Tensor):
+ parameters = [parameters]
+ parameters = list(filter(lambda p: p.grad is not None, parameters))
+ norm_type = float(norm_type)
+ if clip_value is not None:
+ clip_value = float(clip_value)
+
+ total_norm = 0
+ for p in parameters:
+ param_norm = p.grad.data.norm(norm_type)
+ total_norm += param_norm.item() ** norm_type
+ if clip_value is not None:
+ p.grad.data.clamp_(min=-clip_value, max=clip_value)
+ total_norm = total_norm ** (1.0 / norm_type)
+ return total_norm
diff --git a/Waifu-Anime-RCV/lib/infer_pack/models.py b/Waifu-Anime-RCV/lib/infer_pack/models.py
new file mode 100644
index 0000000000000000000000000000000000000000..3665d03bc0514a6ed07d3372ea24717dae1e0a65
--- /dev/null
+++ b/Waifu-Anime-RCV/lib/infer_pack/models.py
@@ -0,0 +1,1142 @@
+import math, pdb, os
+from time import time as ttime
+import torch
+from torch import nn
+from torch.nn import functional as F
+from lib.infer_pack import modules
+from lib.infer_pack import attentions
+from lib.infer_pack import commons
+from lib.infer_pack.commons import init_weights, get_padding
+from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d
+from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm
+from lib.infer_pack.commons import init_weights
+import numpy as np
+from lib.infer_pack import commons
+
+
+class TextEncoder256(nn.Module):
+ def __init__(
+ self,
+ out_channels,
+ hidden_channels,
+ filter_channels,
+ n_heads,
+ n_layers,
+ kernel_size,
+ p_dropout,
+ f0=True,
+ ):
+ super().__init__()
+ self.out_channels = out_channels
+ self.hidden_channels = hidden_channels
+ self.filter_channels = filter_channels
+ self.n_heads = n_heads
+ self.n_layers = n_layers
+ self.kernel_size = kernel_size
+ self.p_dropout = p_dropout
+ self.emb_phone = nn.Linear(256, hidden_channels)
+ self.lrelu = nn.LeakyReLU(0.1, inplace=True)
+ if f0 == True:
+ self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256
+ self.encoder = attentions.Encoder(
+ hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout
+ )
+ self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
+
+ def forward(self, phone, pitch, lengths):
+ if pitch == None:
+ x = self.emb_phone(phone)
+ else:
+ x = self.emb_phone(phone) + self.emb_pitch(pitch)
+ x = x * math.sqrt(self.hidden_channels) # [b, t, h]
+ x = self.lrelu(x)
+ x = torch.transpose(x, 1, -1) # [b, h, t]
+ x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to(
+ x.dtype
+ )
+ x = self.encoder(x * x_mask, x_mask)
+ stats = self.proj(x) * x_mask
+
+ m, logs = torch.split(stats, self.out_channels, dim=1)
+ return m, logs, x_mask
+
+
+class TextEncoder768(nn.Module):
+ def __init__(
+ self,
+ out_channels,
+ hidden_channels,
+ filter_channels,
+ n_heads,
+ n_layers,
+ kernel_size,
+ p_dropout,
+ f0=True,
+ ):
+ super().__init__()
+ self.out_channels = out_channels
+ self.hidden_channels = hidden_channels
+ self.filter_channels = filter_channels
+ self.n_heads = n_heads
+ self.n_layers = n_layers
+ self.kernel_size = kernel_size
+ self.p_dropout = p_dropout
+ self.emb_phone = nn.Linear(768, hidden_channels)
+ self.lrelu = nn.LeakyReLU(0.1, inplace=True)
+ if f0 == True:
+ self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256
+ self.encoder = attentions.Encoder(
+ hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout
+ )
+ self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
+
+ def forward(self, phone, pitch, lengths):
+ if pitch == None:
+ x = self.emb_phone(phone)
+ else:
+ x = self.emb_phone(phone) + self.emb_pitch(pitch)
+ x = x * math.sqrt(self.hidden_channels) # [b, t, h]
+ x = self.lrelu(x)
+ x = torch.transpose(x, 1, -1) # [b, h, t]
+ x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to(
+ x.dtype
+ )
+ x = self.encoder(x * x_mask, x_mask)
+ stats = self.proj(x) * x_mask
+
+ m, logs = torch.split(stats, self.out_channels, dim=1)
+ return m, logs, x_mask
+
+
+class ResidualCouplingBlock(nn.Module):
+ def __init__(
+ self,
+ channels,
+ hidden_channels,
+ kernel_size,
+ dilation_rate,
+ n_layers,
+ n_flows=4,
+ gin_channels=0,
+ ):
+ super().__init__()
+ self.channels = channels
+ self.hidden_channels = hidden_channels
+ self.kernel_size = kernel_size
+ self.dilation_rate = dilation_rate
+ self.n_layers = n_layers
+ self.n_flows = n_flows
+ self.gin_channels = gin_channels
+
+ self.flows = nn.ModuleList()
+ for i in range(n_flows):
+ self.flows.append(
+ modules.ResidualCouplingLayer(
+ channels,
+ hidden_channels,
+ kernel_size,
+ dilation_rate,
+ n_layers,
+ gin_channels=gin_channels,
+ mean_only=True,
+ )
+ )
+ self.flows.append(modules.Flip())
+
+ def forward(self, x, x_mask, g=None, reverse=False):
+ if not reverse:
+ for flow in self.flows:
+ x, _ = flow(x, x_mask, g=g, reverse=reverse)
+ else:
+ for flow in reversed(self.flows):
+ x = flow(x, x_mask, g=g, reverse=reverse)
+ return x
+
+ def remove_weight_norm(self):
+ for i in range(self.n_flows):
+ self.flows[i * 2].remove_weight_norm()
+
+
+class PosteriorEncoder(nn.Module):
+ def __init__(
+ self,
+ in_channels,
+ out_channels,
+ hidden_channels,
+ kernel_size,
+ dilation_rate,
+ n_layers,
+ gin_channels=0,
+ ):
+ super().__init__()
+ self.in_channels = in_channels
+ self.out_channels = out_channels
+ self.hidden_channels = hidden_channels
+ self.kernel_size = kernel_size
+ self.dilation_rate = dilation_rate
+ self.n_layers = n_layers
+ self.gin_channels = gin_channels
+
+ self.pre = nn.Conv1d(in_channels, hidden_channels, 1)
+ self.enc = modules.WN(
+ hidden_channels,
+ kernel_size,
+ dilation_rate,
+ n_layers,
+ gin_channels=gin_channels,
+ )
+ self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
+
+ def forward(self, x, x_lengths, g=None):
+ x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(
+ x.dtype
+ )
+ x = self.pre(x) * x_mask
+ x = self.enc(x, x_mask, g=g)
+ stats = self.proj(x) * x_mask
+ m, logs = torch.split(stats, self.out_channels, dim=1)
+ z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask
+ return z, m, logs, x_mask
+
+ def remove_weight_norm(self):
+ self.enc.remove_weight_norm()
+
+
+class Generator(torch.nn.Module):
+ def __init__(
+ self,
+ initial_channel,
+ resblock,
+ resblock_kernel_sizes,
+ resblock_dilation_sizes,
+ upsample_rates,
+ upsample_initial_channel,
+ upsample_kernel_sizes,
+ gin_channels=0,
+ ):
+ super(Generator, self).__init__()
+ self.num_kernels = len(resblock_kernel_sizes)
+ self.num_upsamples = len(upsample_rates)
+ self.conv_pre = Conv1d(
+ initial_channel, upsample_initial_channel, 7, 1, padding=3
+ )
+ resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2
+
+ self.ups = nn.ModuleList()
+ for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
+ self.ups.append(
+ weight_norm(
+ ConvTranspose1d(
+ upsample_initial_channel // (2**i),
+ upsample_initial_channel // (2 ** (i + 1)),
+ k,
+ u,
+ padding=(k - u) // 2,
+ )
+ )
+ )
+
+ self.resblocks = nn.ModuleList()
+ for i in range(len(self.ups)):
+ ch = upsample_initial_channel // (2 ** (i + 1))
+ for j, (k, d) in enumerate(
+ zip(resblock_kernel_sizes, resblock_dilation_sizes)
+ ):
+ self.resblocks.append(resblock(ch, k, d))
+
+ self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False)
+ self.ups.apply(init_weights)
+
+ if gin_channels != 0:
+ self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1)
+
+ def forward(self, x, g=None):
+ x = self.conv_pre(x)
+ if g is not None:
+ x = x + self.cond(g)
+
+ for i in range(self.num_upsamples):
+ x = F.leaky_relu(x, modules.LRELU_SLOPE)
+ x = self.ups[i](x)
+ xs = None
+ for j in range(self.num_kernels):
+ if xs is None:
+ xs = self.resblocks[i * self.num_kernels + j](x)
+ else:
+ xs += self.resblocks[i * self.num_kernels + j](x)
+ x = xs / self.num_kernels
+ x = F.leaky_relu(x)
+ x = self.conv_post(x)
+ x = torch.tanh(x)
+
+ return x
+
+ def remove_weight_norm(self):
+ for l in self.ups:
+ remove_weight_norm(l)
+ for l in self.resblocks:
+ l.remove_weight_norm()
+
+
+class SineGen(torch.nn.Module):
+ """Definition of sine generator
+ SineGen(samp_rate, harmonic_num = 0,
+ sine_amp = 0.1, noise_std = 0.003,
+ voiced_threshold = 0,
+ flag_for_pulse=False)
+ samp_rate: sampling rate in Hz
+ harmonic_num: number of harmonic overtones (default 0)
+ sine_amp: amplitude of sine-wavefrom (default 0.1)
+ noise_std: std of Gaussian noise (default 0.003)
+ voiced_thoreshold: F0 threshold for U/V classification (default 0)
+ flag_for_pulse: this SinGen is used inside PulseGen (default False)
+ Note: when flag_for_pulse is True, the first time step of a voiced
+ segment is always sin(np.pi) or cos(0)
+ """
+
+ def __init__(
+ self,
+ samp_rate,
+ harmonic_num=0,
+ sine_amp=0.1,
+ noise_std=0.003,
+ voiced_threshold=0,
+ flag_for_pulse=False,
+ ):
+ super(SineGen, self).__init__()
+ self.sine_amp = sine_amp
+ self.noise_std = noise_std
+ self.harmonic_num = harmonic_num
+ self.dim = self.harmonic_num + 1
+ self.sampling_rate = samp_rate
+ self.voiced_threshold = voiced_threshold
+
+ def _f02uv(self, f0):
+ # generate uv signal
+ uv = torch.ones_like(f0)
+ uv = uv * (f0 > self.voiced_threshold)
+ return uv
+
+ def forward(self, f0, upp):
+ """sine_tensor, uv = forward(f0)
+ input F0: tensor(batchsize=1, length, dim=1)
+ f0 for unvoiced steps should be 0
+ output sine_tensor: tensor(batchsize=1, length, dim)
+ output uv: tensor(batchsize=1, length, 1)
+ """
+ with torch.no_grad():
+ f0 = f0[:, None].transpose(1, 2)
+ f0_buf = torch.zeros(f0.shape[0], f0.shape[1], self.dim, device=f0.device)
+ # fundamental component
+ f0_buf[:, :, 0] = f0[:, :, 0]
+ for idx in np.arange(self.harmonic_num):
+ f0_buf[:, :, idx + 1] = f0_buf[:, :, 0] * (
+ idx + 2
+ ) # idx + 2: the (idx+1)-th overtone, (idx+2)-th harmonic
+ rad_values = (f0_buf / self.sampling_rate) % 1 ###%1意味着n_har的乘积无法后处理优化
+ rand_ini = torch.rand(
+ f0_buf.shape[0], f0_buf.shape[2], device=f0_buf.device
+ )
+ rand_ini[:, 0] = 0
+ rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini
+ tmp_over_one = torch.cumsum(rad_values, 1) # % 1 #####%1意味着后面的cumsum无法再优化
+ tmp_over_one *= upp
+ tmp_over_one = F.interpolate(
+ tmp_over_one.transpose(2, 1),
+ scale_factor=upp,
+ mode="linear",
+ align_corners=True,
+ ).transpose(2, 1)
+ rad_values = F.interpolate(
+ rad_values.transpose(2, 1), scale_factor=upp, mode="nearest"
+ ).transpose(
+ 2, 1
+ ) #######
+ tmp_over_one %= 1
+ tmp_over_one_idx = (tmp_over_one[:, 1:, :] - tmp_over_one[:, :-1, :]) < 0
+ cumsum_shift = torch.zeros_like(rad_values)
+ cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0
+ sine_waves = torch.sin(
+ torch.cumsum(rad_values + cumsum_shift, dim=1) * 2 * np.pi
+ )
+ sine_waves = sine_waves * self.sine_amp
+ uv = self._f02uv(f0)
+ uv = F.interpolate(
+ uv.transpose(2, 1), scale_factor=upp, mode="nearest"
+ ).transpose(2, 1)
+ noise_amp = uv * self.noise_std + (1 - uv) * self.sine_amp / 3
+ noise = noise_amp * torch.randn_like(sine_waves)
+ sine_waves = sine_waves * uv + noise
+ return sine_waves, uv, noise
+
+
+class SourceModuleHnNSF(torch.nn.Module):
+ """SourceModule for hn-nsf
+ SourceModule(sampling_rate, harmonic_num=0, sine_amp=0.1,
+ add_noise_std=0.003, voiced_threshod=0)
+ sampling_rate: sampling_rate in Hz
+ harmonic_num: number of harmonic above F0 (default: 0)
+ sine_amp: amplitude of sine source signal (default: 0.1)
+ add_noise_std: std of additive Gaussian noise (default: 0.003)
+ note that amplitude of noise in unvoiced is decided
+ by sine_amp
+ voiced_threshold: threhold to set U/V given F0 (default: 0)
+ Sine_source, noise_source = SourceModuleHnNSF(F0_sampled)
+ F0_sampled (batchsize, length, 1)
+ Sine_source (batchsize, length, 1)
+ noise_source (batchsize, length 1)
+ uv (batchsize, length, 1)
+ """
+
+ def __init__(
+ self,
+ sampling_rate,
+ harmonic_num=0,
+ sine_amp=0.1,
+ add_noise_std=0.003,
+ voiced_threshod=0,
+ is_half=True,
+ ):
+ super(SourceModuleHnNSF, self).__init__()
+
+ self.sine_amp = sine_amp
+ self.noise_std = add_noise_std
+ self.is_half = is_half
+ # to produce sine waveforms
+ self.l_sin_gen = SineGen(
+ sampling_rate, harmonic_num, sine_amp, add_noise_std, voiced_threshod
+ )
+
+ # to merge source harmonics into a single excitation
+ self.l_linear = torch.nn.Linear(harmonic_num + 1, 1)
+ self.l_tanh = torch.nn.Tanh()
+
+ def forward(self, x, upp=None):
+ sine_wavs, uv, _ = self.l_sin_gen(x, upp)
+ if self.is_half:
+ sine_wavs = sine_wavs.half()
+ sine_merge = self.l_tanh(self.l_linear(sine_wavs))
+ return sine_merge, None, None # noise, uv
+
+
+class GeneratorNSF(torch.nn.Module):
+ def __init__(
+ self,
+ initial_channel,
+ resblock,
+ resblock_kernel_sizes,
+ resblock_dilation_sizes,
+ upsample_rates,
+ upsample_initial_channel,
+ upsample_kernel_sizes,
+ gin_channels,
+ sr,
+ is_half=False,
+ ):
+ super(GeneratorNSF, self).__init__()
+ self.num_kernels = len(resblock_kernel_sizes)
+ self.num_upsamples = len(upsample_rates)
+
+ self.f0_upsamp = torch.nn.Upsample(scale_factor=np.prod(upsample_rates))
+ self.m_source = SourceModuleHnNSF(
+ sampling_rate=sr, harmonic_num=0, is_half=is_half
+ )
+ self.noise_convs = nn.ModuleList()
+ self.conv_pre = Conv1d(
+ initial_channel, upsample_initial_channel, 7, 1, padding=3
+ )
+ resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2
+
+ self.ups = nn.ModuleList()
+ for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
+ c_cur = upsample_initial_channel // (2 ** (i + 1))
+ self.ups.append(
+ weight_norm(
+ ConvTranspose1d(
+ upsample_initial_channel // (2**i),
+ upsample_initial_channel // (2 ** (i + 1)),
+ k,
+ u,
+ padding=(k - u) // 2,
+ )
+ )
+ )
+ if i + 1 < len(upsample_rates):
+ stride_f0 = np.prod(upsample_rates[i + 1 :])
+ self.noise_convs.append(
+ Conv1d(
+ 1,
+ c_cur,
+ kernel_size=stride_f0 * 2,
+ stride=stride_f0,
+ padding=stride_f0 // 2,
+ )
+ )
+ else:
+ self.noise_convs.append(Conv1d(1, c_cur, kernel_size=1))
+
+ self.resblocks = nn.ModuleList()
+ for i in range(len(self.ups)):
+ ch = upsample_initial_channel // (2 ** (i + 1))
+ for j, (k, d) in enumerate(
+ zip(resblock_kernel_sizes, resblock_dilation_sizes)
+ ):
+ self.resblocks.append(resblock(ch, k, d))
+
+ self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False)
+ self.ups.apply(init_weights)
+
+ if gin_channels != 0:
+ self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1)
+
+ self.upp = np.prod(upsample_rates)
+
+ def forward(self, x, f0, g=None):
+ har_source, noi_source, uv = self.m_source(f0, self.upp)
+ har_source = har_source.transpose(1, 2)
+ x = self.conv_pre(x)
+ if g is not None:
+ x = x + self.cond(g)
+
+ for i in range(self.num_upsamples):
+ x = F.leaky_relu(x, modules.LRELU_SLOPE)
+ x = self.ups[i](x)
+ x_source = self.noise_convs[i](har_source)
+ x = x + x_source
+ xs = None
+ for j in range(self.num_kernels):
+ if xs is None:
+ xs = self.resblocks[i * self.num_kernels + j](x)
+ else:
+ xs += self.resblocks[i * self.num_kernels + j](x)
+ x = xs / self.num_kernels
+ x = F.leaky_relu(x)
+ x = self.conv_post(x)
+ x = torch.tanh(x)
+ return x
+
+ def remove_weight_norm(self):
+ for l in self.ups:
+ remove_weight_norm(l)
+ for l in self.resblocks:
+ l.remove_weight_norm()
+
+
+sr2sr = {
+ "32k": 32000,
+ "40k": 40000,
+ "48k": 48000,
+}
+
+
+class SynthesizerTrnMs256NSFsid(nn.Module):
+ def __init__(
+ self,
+ spec_channels,
+ segment_size,
+ inter_channels,
+ hidden_channels,
+ filter_channels,
+ n_heads,
+ n_layers,
+ kernel_size,
+ p_dropout,
+ resblock,
+ resblock_kernel_sizes,
+ resblock_dilation_sizes,
+ upsample_rates,
+ upsample_initial_channel,
+ upsample_kernel_sizes,
+ spk_embed_dim,
+ gin_channels,
+ sr,
+ **kwargs
+ ):
+ super().__init__()
+ if type(sr) == type("strr"):
+ sr = sr2sr[sr]
+ self.spec_channels = spec_channels
+ self.inter_channels = inter_channels
+ self.hidden_channels = hidden_channels
+ self.filter_channels = filter_channels
+ self.n_heads = n_heads
+ self.n_layers = n_layers
+ self.kernel_size = kernel_size
+ self.p_dropout = p_dropout
+ self.resblock = resblock
+ self.resblock_kernel_sizes = resblock_kernel_sizes
+ self.resblock_dilation_sizes = resblock_dilation_sizes
+ self.upsample_rates = upsample_rates
+ self.upsample_initial_channel = upsample_initial_channel
+ self.upsample_kernel_sizes = upsample_kernel_sizes
+ self.segment_size = segment_size
+ self.gin_channels = gin_channels
+ # self.hop_length = hop_length#
+ self.spk_embed_dim = spk_embed_dim
+ self.enc_p = TextEncoder256(
+ inter_channels,
+ hidden_channels,
+ filter_channels,
+ n_heads,
+ n_layers,
+ kernel_size,
+ p_dropout,
+ )
+ self.dec = GeneratorNSF(
+ inter_channels,
+ resblock,
+ resblock_kernel_sizes,
+ resblock_dilation_sizes,
+ upsample_rates,
+ upsample_initial_channel,
+ upsample_kernel_sizes,
+ gin_channels=gin_channels,
+ sr=sr,
+ is_half=kwargs["is_half"],
+ )
+ self.enc_q = PosteriorEncoder(
+ spec_channels,
+ inter_channels,
+ hidden_channels,
+ 5,
+ 1,
+ 16,
+ gin_channels=gin_channels,
+ )
+ self.flow = ResidualCouplingBlock(
+ inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels
+ )
+ self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)
+ print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim)
+
+ def remove_weight_norm(self):
+ self.dec.remove_weight_norm()
+ self.flow.remove_weight_norm()
+ self.enc_q.remove_weight_norm()
+
+ def forward(
+ self, phone, phone_lengths, pitch, pitchf, y, y_lengths, ds
+ ): # 这里ds是id,[bs,1]
+ # print(1,pitch.shape)#[bs,t]
+ g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的
+ m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)
+ z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)
+ z_p = self.flow(z, y_mask, g=g)
+ z_slice, ids_slice = commons.rand_slice_segments(
+ z, y_lengths, self.segment_size
+ )
+ # print(-1,pitchf.shape,ids_slice,self.segment_size,self.hop_length,self.segment_size//self.hop_length)
+ pitchf = commons.slice_segments2(pitchf, ids_slice, self.segment_size)
+ # print(-2,pitchf.shape,z_slice.shape)
+ o = self.dec(z_slice, pitchf, g=g)
+ return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)
+
+ def infer(self, phone, phone_lengths, pitch, nsff0, sid, rate=None):
+ g = self.emb_g(sid).unsqueeze(-1)
+ m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)
+ z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask
+ if rate:
+ head = int(z_p.shape[2] * rate)
+ z_p = z_p[:, :, -head:]
+ x_mask = x_mask[:, :, -head:]
+ nsff0 = nsff0[:, -head:]
+ z = self.flow(z_p, x_mask, g=g, reverse=True)
+ o = self.dec(z * x_mask, nsff0, g=g)
+ return o, x_mask, (z, z_p, m_p, logs_p)
+
+
+class SynthesizerTrnMs768NSFsid(nn.Module):
+ def __init__(
+ self,
+ spec_channels,
+ segment_size,
+ inter_channels,
+ hidden_channels,
+ filter_channels,
+ n_heads,
+ n_layers,
+ kernel_size,
+ p_dropout,
+ resblock,
+ resblock_kernel_sizes,
+ resblock_dilation_sizes,
+ upsample_rates,
+ upsample_initial_channel,
+ upsample_kernel_sizes,
+ spk_embed_dim,
+ gin_channels,
+ sr,
+ **kwargs
+ ):
+ super().__init__()
+ if type(sr) == type("strr"):
+ sr = sr2sr[sr]
+ self.spec_channels = spec_channels
+ self.inter_channels = inter_channels
+ self.hidden_channels = hidden_channels
+ self.filter_channels = filter_channels
+ self.n_heads = n_heads
+ self.n_layers = n_layers
+ self.kernel_size = kernel_size
+ self.p_dropout = p_dropout
+ self.resblock = resblock
+ self.resblock_kernel_sizes = resblock_kernel_sizes
+ self.resblock_dilation_sizes = resblock_dilation_sizes
+ self.upsample_rates = upsample_rates
+ self.upsample_initial_channel = upsample_initial_channel
+ self.upsample_kernel_sizes = upsample_kernel_sizes
+ self.segment_size = segment_size
+ self.gin_channels = gin_channels
+ # self.hop_length = hop_length#
+ self.spk_embed_dim = spk_embed_dim
+ self.enc_p = TextEncoder768(
+ inter_channels,
+ hidden_channels,
+ filter_channels,
+ n_heads,
+ n_layers,
+ kernel_size,
+ p_dropout,
+ )
+ self.dec = GeneratorNSF(
+ inter_channels,
+ resblock,
+ resblock_kernel_sizes,
+ resblock_dilation_sizes,
+ upsample_rates,
+ upsample_initial_channel,
+ upsample_kernel_sizes,
+ gin_channels=gin_channels,
+ sr=sr,
+ is_half=kwargs["is_half"],
+ )
+ self.enc_q = PosteriorEncoder(
+ spec_channels,
+ inter_channels,
+ hidden_channels,
+ 5,
+ 1,
+ 16,
+ gin_channels=gin_channels,
+ )
+ self.flow = ResidualCouplingBlock(
+ inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels
+ )
+ self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)
+ print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim)
+
+ def remove_weight_norm(self):
+ self.dec.remove_weight_norm()
+ self.flow.remove_weight_norm()
+ self.enc_q.remove_weight_norm()
+
+ def forward(
+ self, phone, phone_lengths, pitch, pitchf, y, y_lengths, ds
+ ): # 这里ds是id,[bs,1]
+ # print(1,pitch.shape)#[bs,t]
+ g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的
+ m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)
+ z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)
+ z_p = self.flow(z, y_mask, g=g)
+ z_slice, ids_slice = commons.rand_slice_segments(
+ z, y_lengths, self.segment_size
+ )
+ # print(-1,pitchf.shape,ids_slice,self.segment_size,self.hop_length,self.segment_size//self.hop_length)
+ pitchf = commons.slice_segments2(pitchf, ids_slice, self.segment_size)
+ # print(-2,pitchf.shape,z_slice.shape)
+ o = self.dec(z_slice, pitchf, g=g)
+ return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)
+
+ def infer(self, phone, phone_lengths, pitch, nsff0, sid, rate=None):
+ g = self.emb_g(sid).unsqueeze(-1)
+ m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)
+ z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask
+ if rate:
+ head = int(z_p.shape[2] * rate)
+ z_p = z_p[:, :, -head:]
+ x_mask = x_mask[:, :, -head:]
+ nsff0 = nsff0[:, -head:]
+ z = self.flow(z_p, x_mask, g=g, reverse=True)
+ o = self.dec(z * x_mask, nsff0, g=g)
+ return o, x_mask, (z, z_p, m_p, logs_p)
+
+
+class SynthesizerTrnMs256NSFsid_nono(nn.Module):
+ def __init__(
+ self,
+ spec_channels,
+ segment_size,
+ inter_channels,
+ hidden_channels,
+ filter_channels,
+ n_heads,
+ n_layers,
+ kernel_size,
+ p_dropout,
+ resblock,
+ resblock_kernel_sizes,
+ resblock_dilation_sizes,
+ upsample_rates,
+ upsample_initial_channel,
+ upsample_kernel_sizes,
+ spk_embed_dim,
+ gin_channels,
+ sr=None,
+ **kwargs
+ ):
+ super().__init__()
+ self.spec_channels = spec_channels
+ self.inter_channels = inter_channels
+ self.hidden_channels = hidden_channels
+ self.filter_channels = filter_channels
+ self.n_heads = n_heads
+ self.n_layers = n_layers
+ self.kernel_size = kernel_size
+ self.p_dropout = p_dropout
+ self.resblock = resblock
+ self.resblock_kernel_sizes = resblock_kernel_sizes
+ self.resblock_dilation_sizes = resblock_dilation_sizes
+ self.upsample_rates = upsample_rates
+ self.upsample_initial_channel = upsample_initial_channel
+ self.upsample_kernel_sizes = upsample_kernel_sizes
+ self.segment_size = segment_size
+ self.gin_channels = gin_channels
+ # self.hop_length = hop_length#
+ self.spk_embed_dim = spk_embed_dim
+ self.enc_p = TextEncoder256(
+ inter_channels,
+ hidden_channels,
+ filter_channels,
+ n_heads,
+ n_layers,
+ kernel_size,
+ p_dropout,
+ f0=False,
+ )
+ self.dec = Generator(
+ inter_channels,
+ resblock,
+ resblock_kernel_sizes,
+ resblock_dilation_sizes,
+ upsample_rates,
+ upsample_initial_channel,
+ upsample_kernel_sizes,
+ gin_channels=gin_channels,
+ )
+ self.enc_q = PosteriorEncoder(
+ spec_channels,
+ inter_channels,
+ hidden_channels,
+ 5,
+ 1,
+ 16,
+ gin_channels=gin_channels,
+ )
+ self.flow = ResidualCouplingBlock(
+ inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels
+ )
+ self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)
+ print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim)
+
+ def remove_weight_norm(self):
+ self.dec.remove_weight_norm()
+ self.flow.remove_weight_norm()
+ self.enc_q.remove_weight_norm()
+
+ def forward(self, phone, phone_lengths, y, y_lengths, ds): # 这里ds是id,[bs,1]
+ g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的
+ m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)
+ z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)
+ z_p = self.flow(z, y_mask, g=g)
+ z_slice, ids_slice = commons.rand_slice_segments(
+ z, y_lengths, self.segment_size
+ )
+ o = self.dec(z_slice, g=g)
+ return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)
+
+ def infer(self, phone, phone_lengths, sid, rate=None):
+ g = self.emb_g(sid).unsqueeze(-1)
+ m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)
+ z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask
+ if rate:
+ head = int(z_p.shape[2] * rate)
+ z_p = z_p[:, :, -head:]
+ x_mask = x_mask[:, :, -head:]
+ z = self.flow(z_p, x_mask, g=g, reverse=True)
+ o = self.dec(z * x_mask, g=g)
+ return o, x_mask, (z, z_p, m_p, logs_p)
+
+
+class SynthesizerTrnMs768NSFsid_nono(nn.Module):
+ def __init__(
+ self,
+ spec_channels,
+ segment_size,
+ inter_channels,
+ hidden_channels,
+ filter_channels,
+ n_heads,
+ n_layers,
+ kernel_size,
+ p_dropout,
+ resblock,
+ resblock_kernel_sizes,
+ resblock_dilation_sizes,
+ upsample_rates,
+ upsample_initial_channel,
+ upsample_kernel_sizes,
+ spk_embed_dim,
+ gin_channels,
+ sr=None,
+ **kwargs
+ ):
+ super().__init__()
+ self.spec_channels = spec_channels
+ self.inter_channels = inter_channels
+ self.hidden_channels = hidden_channels
+ self.filter_channels = filter_channels
+ self.n_heads = n_heads
+ self.n_layers = n_layers
+ self.kernel_size = kernel_size
+ self.p_dropout = p_dropout
+ self.resblock = resblock
+ self.resblock_kernel_sizes = resblock_kernel_sizes
+ self.resblock_dilation_sizes = resblock_dilation_sizes
+ self.upsample_rates = upsample_rates
+ self.upsample_initial_channel = upsample_initial_channel
+ self.upsample_kernel_sizes = upsample_kernel_sizes
+ self.segment_size = segment_size
+ self.gin_channels = gin_channels
+ # self.hop_length = hop_length#
+ self.spk_embed_dim = spk_embed_dim
+ self.enc_p = TextEncoder768(
+ inter_channels,
+ hidden_channels,
+ filter_channels,
+ n_heads,
+ n_layers,
+ kernel_size,
+ p_dropout,
+ f0=False,
+ )
+ self.dec = Generator(
+ inter_channels,
+ resblock,
+ resblock_kernel_sizes,
+ resblock_dilation_sizes,
+ upsample_rates,
+ upsample_initial_channel,
+ upsample_kernel_sizes,
+ gin_channels=gin_channels,
+ )
+ self.enc_q = PosteriorEncoder(
+ spec_channels,
+ inter_channels,
+ hidden_channels,
+ 5,
+ 1,
+ 16,
+ gin_channels=gin_channels,
+ )
+ self.flow = ResidualCouplingBlock(
+ inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels
+ )
+ self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)
+ print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim)
+
+ def remove_weight_norm(self):
+ self.dec.remove_weight_norm()
+ self.flow.remove_weight_norm()
+ self.enc_q.remove_weight_norm()
+
+ def forward(self, phone, phone_lengths, y, y_lengths, ds): # 这里ds是id,[bs,1]
+ g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的
+ m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)
+ z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)
+ z_p = self.flow(z, y_mask, g=g)
+ z_slice, ids_slice = commons.rand_slice_segments(
+ z, y_lengths, self.segment_size
+ )
+ o = self.dec(z_slice, g=g)
+ return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)
+
+ def infer(self, phone, phone_lengths, sid, rate=None):
+ g = self.emb_g(sid).unsqueeze(-1)
+ m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)
+ z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask
+ if rate:
+ head = int(z_p.shape[2] * rate)
+ z_p = z_p[:, :, -head:]
+ x_mask = x_mask[:, :, -head:]
+ z = self.flow(z_p, x_mask, g=g, reverse=True)
+ o = self.dec(z * x_mask, g=g)
+ return o, x_mask, (z, z_p, m_p, logs_p)
+
+
+class MultiPeriodDiscriminator(torch.nn.Module):
+ def __init__(self, use_spectral_norm=False):
+ super(MultiPeriodDiscriminator, self).__init__()
+ periods = [2, 3, 5, 7, 11, 17]
+ # periods = [3, 5, 7, 11, 17, 23, 37]
+
+ discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)]
+ discs = discs + [
+ DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods
+ ]
+ self.discriminators = nn.ModuleList(discs)
+
+ def forward(self, y, y_hat):
+ y_d_rs = [] #
+ y_d_gs = []
+ fmap_rs = []
+ fmap_gs = []
+ for i, d in enumerate(self.discriminators):
+ y_d_r, fmap_r = d(y)
+ y_d_g, fmap_g = d(y_hat)
+ # for j in range(len(fmap_r)):
+ # print(i,j,y.shape,y_hat.shape,fmap_r[j].shape,fmap_g[j].shape)
+ y_d_rs.append(y_d_r)
+ y_d_gs.append(y_d_g)
+ fmap_rs.append(fmap_r)
+ fmap_gs.append(fmap_g)
+
+ return y_d_rs, y_d_gs, fmap_rs, fmap_gs
+
+
+class MultiPeriodDiscriminatorV2(torch.nn.Module):
+ def __init__(self, use_spectral_norm=False):
+ super(MultiPeriodDiscriminatorV2, self).__init__()
+ # periods = [2, 3, 5, 7, 11, 17]
+ periods = [2, 3, 5, 7, 11, 17, 23, 37]
+
+ discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)]
+ discs = discs + [
+ DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods
+ ]
+ self.discriminators = nn.ModuleList(discs)
+
+ def forward(self, y, y_hat):
+ y_d_rs = [] #
+ y_d_gs = []
+ fmap_rs = []
+ fmap_gs = []
+ for i, d in enumerate(self.discriminators):
+ y_d_r, fmap_r = d(y)
+ y_d_g, fmap_g = d(y_hat)
+ # for j in range(len(fmap_r)):
+ # print(i,j,y.shape,y_hat.shape,fmap_r[j].shape,fmap_g[j].shape)
+ y_d_rs.append(y_d_r)
+ y_d_gs.append(y_d_g)
+ fmap_rs.append(fmap_r)
+ fmap_gs.append(fmap_g)
+
+ return y_d_rs, y_d_gs, fmap_rs, fmap_gs
+
+
+class DiscriminatorS(torch.nn.Module):
+ def __init__(self, use_spectral_norm=False):
+ super(DiscriminatorS, self).__init__()
+ norm_f = weight_norm if use_spectral_norm == False else spectral_norm
+ self.convs = nn.ModuleList(
+ [
+ norm_f(Conv1d(1, 16, 15, 1, padding=7)),
+ norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)),
+ norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)),
+ norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)),
+ norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)),
+ norm_f(Conv1d(1024, 1024, 5, 1, padding=2)),
+ ]
+ )
+ self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1))
+
+ def forward(self, x):
+ fmap = []
+
+ for l in self.convs:
+ x = l(x)
+ x = F.leaky_relu(x, modules.LRELU_SLOPE)
+ fmap.append(x)
+ x = self.conv_post(x)
+ fmap.append(x)
+ x = torch.flatten(x, 1, -1)
+
+ return x, fmap
+
+
+class DiscriminatorP(torch.nn.Module):
+ def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False):
+ super(DiscriminatorP, self).__init__()
+ self.period = period
+ self.use_spectral_norm = use_spectral_norm
+ norm_f = weight_norm if use_spectral_norm == False else spectral_norm
+ self.convs = nn.ModuleList(
+ [
+ norm_f(
+ Conv2d(
+ 1,
+ 32,
+ (kernel_size, 1),
+ (stride, 1),
+ padding=(get_padding(kernel_size, 1), 0),
+ )
+ ),
+ norm_f(
+ Conv2d(
+ 32,
+ 128,
+ (kernel_size, 1),
+ (stride, 1),
+ padding=(get_padding(kernel_size, 1), 0),
+ )
+ ),
+ norm_f(
+ Conv2d(
+ 128,
+ 512,
+ (kernel_size, 1),
+ (stride, 1),
+ padding=(get_padding(kernel_size, 1), 0),
+ )
+ ),
+ norm_f(
+ Conv2d(
+ 512,
+ 1024,
+ (kernel_size, 1),
+ (stride, 1),
+ padding=(get_padding(kernel_size, 1), 0),
+ )
+ ),
+ norm_f(
+ Conv2d(
+ 1024,
+ 1024,
+ (kernel_size, 1),
+ 1,
+ padding=(get_padding(kernel_size, 1), 0),
+ )
+ ),
+ ]
+ )
+ self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0)))
+
+ def forward(self, x):
+ fmap = []
+
+ # 1d to 2d
+ b, c, t = x.shape
+ if t % self.period != 0: # pad first
+ n_pad = self.period - (t % self.period)
+ x = F.pad(x, (0, n_pad), "reflect")
+ t = t + n_pad
+ x = x.view(b, c, t // self.period, self.period)
+
+ for l in self.convs:
+ x = l(x)
+ x = F.leaky_relu(x, modules.LRELU_SLOPE)
+ fmap.append(x)
+ x = self.conv_post(x)
+ fmap.append(x)
+ x = torch.flatten(x, 1, -1)
+
+ return x, fmap
diff --git a/Waifu-Anime-RCV/lib/infer_pack/models_dml.py b/Waifu-Anime-RCV/lib/infer_pack/models_dml.py
new file mode 100644
index 0000000000000000000000000000000000000000..958d7b29259763d2fea94caf8ba7e314c4a77d05
--- /dev/null
+++ b/Waifu-Anime-RCV/lib/infer_pack/models_dml.py
@@ -0,0 +1,1124 @@
+import math, pdb, os
+from time import time as ttime
+import torch
+from torch import nn
+from torch.nn import functional as F
+from lib.infer_pack import modules
+from lib.infer_pack import attentions
+from lib.infer_pack import commons
+from lib.infer_pack.commons import init_weights, get_padding
+from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d
+from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm
+from lib.infer_pack.commons import init_weights
+import numpy as np
+from lib.infer_pack import commons
+
+
+class TextEncoder256(nn.Module):
+ def __init__(
+ self,
+ out_channels,
+ hidden_channels,
+ filter_channels,
+ n_heads,
+ n_layers,
+ kernel_size,
+ p_dropout,
+ f0=True,
+ ):
+ super().__init__()
+ self.out_channels = out_channels
+ self.hidden_channels = hidden_channels
+ self.filter_channels = filter_channels
+ self.n_heads = n_heads
+ self.n_layers = n_layers
+ self.kernel_size = kernel_size
+ self.p_dropout = p_dropout
+ self.emb_phone = nn.Linear(256, hidden_channels)
+ self.lrelu = nn.LeakyReLU(0.1, inplace=True)
+ if f0 == True:
+ self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256
+ self.encoder = attentions.Encoder(
+ hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout
+ )
+ self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
+
+ def forward(self, phone, pitch, lengths):
+ if pitch == None:
+ x = self.emb_phone(phone)
+ else:
+ x = self.emb_phone(phone) + self.emb_pitch(pitch)
+ x = x * math.sqrt(self.hidden_channels) # [b, t, h]
+ x = self.lrelu(x)
+ x = torch.transpose(x, 1, -1) # [b, h, t]
+ x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to(
+ x.dtype
+ )
+ x = self.encoder(x * x_mask, x_mask)
+ stats = self.proj(x) * x_mask
+
+ m, logs = torch.split(stats, self.out_channels, dim=1)
+ return m, logs, x_mask
+
+
+class TextEncoder768(nn.Module):
+ def __init__(
+ self,
+ out_channels,
+ hidden_channels,
+ filter_channels,
+ n_heads,
+ n_layers,
+ kernel_size,
+ p_dropout,
+ f0=True,
+ ):
+ super().__init__()
+ self.out_channels = out_channels
+ self.hidden_channels = hidden_channels
+ self.filter_channels = filter_channels
+ self.n_heads = n_heads
+ self.n_layers = n_layers
+ self.kernel_size = kernel_size
+ self.p_dropout = p_dropout
+ self.emb_phone = nn.Linear(768, hidden_channels)
+ self.lrelu = nn.LeakyReLU(0.1, inplace=True)
+ if f0 == True:
+ self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256
+ self.encoder = attentions.Encoder(
+ hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout
+ )
+ self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
+
+ def forward(self, phone, pitch, lengths):
+ if pitch == None:
+ x = self.emb_phone(phone)
+ else:
+ x = self.emb_phone(phone) + self.emb_pitch(pitch)
+ x = x * math.sqrt(self.hidden_channels) # [b, t, h]
+ x = self.lrelu(x)
+ x = torch.transpose(x, 1, -1) # [b, h, t]
+ x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to(
+ x.dtype
+ )
+ x = self.encoder(x * x_mask, x_mask)
+ stats = self.proj(x) * x_mask
+
+ m, logs = torch.split(stats, self.out_channels, dim=1)
+ return m, logs, x_mask
+
+
+class ResidualCouplingBlock(nn.Module):
+ def __init__(
+ self,
+ channels,
+ hidden_channels,
+ kernel_size,
+ dilation_rate,
+ n_layers,
+ n_flows=4,
+ gin_channels=0,
+ ):
+ super().__init__()
+ self.channels = channels
+ self.hidden_channels = hidden_channels
+ self.kernel_size = kernel_size
+ self.dilation_rate = dilation_rate
+ self.n_layers = n_layers
+ self.n_flows = n_flows
+ self.gin_channels = gin_channels
+
+ self.flows = nn.ModuleList()
+ for i in range(n_flows):
+ self.flows.append(
+ modules.ResidualCouplingLayer(
+ channels,
+ hidden_channels,
+ kernel_size,
+ dilation_rate,
+ n_layers,
+ gin_channels=gin_channels,
+ mean_only=True,
+ )
+ )
+ self.flows.append(modules.Flip())
+
+ def forward(self, x, x_mask, g=None, reverse=False):
+ if not reverse:
+ for flow in self.flows:
+ x, _ = flow(x, x_mask, g=g, reverse=reverse)
+ else:
+ for flow in reversed(self.flows):
+ x = flow(x, x_mask, g=g, reverse=reverse)
+ return x
+
+ def remove_weight_norm(self):
+ for i in range(self.n_flows):
+ self.flows[i * 2].remove_weight_norm()
+
+
+class PosteriorEncoder(nn.Module):
+ def __init__(
+ self,
+ in_channels,
+ out_channels,
+ hidden_channels,
+ kernel_size,
+ dilation_rate,
+ n_layers,
+ gin_channels=0,
+ ):
+ super().__init__()
+ self.in_channels = in_channels
+ self.out_channels = out_channels
+ self.hidden_channels = hidden_channels
+ self.kernel_size = kernel_size
+ self.dilation_rate = dilation_rate
+ self.n_layers = n_layers
+ self.gin_channels = gin_channels
+
+ self.pre = nn.Conv1d(in_channels, hidden_channels, 1)
+ self.enc = modules.WN(
+ hidden_channels,
+ kernel_size,
+ dilation_rate,
+ n_layers,
+ gin_channels=gin_channels,
+ )
+ self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
+
+ def forward(self, x, x_lengths, g=None):
+ x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(
+ x.dtype
+ )
+ x = self.pre(x) * x_mask
+ x = self.enc(x, x_mask, g=g)
+ stats = self.proj(x) * x_mask
+ m, logs = torch.split(stats, self.out_channels, dim=1)
+ z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask
+ return z, m, logs, x_mask
+
+ def remove_weight_norm(self):
+ self.enc.remove_weight_norm()
+
+
+class Generator(torch.nn.Module):
+ def __init__(
+ self,
+ initial_channel,
+ resblock,
+ resblock_kernel_sizes,
+ resblock_dilation_sizes,
+ upsample_rates,
+ upsample_initial_channel,
+ upsample_kernel_sizes,
+ gin_channels=0,
+ ):
+ super(Generator, self).__init__()
+ self.num_kernels = len(resblock_kernel_sizes)
+ self.num_upsamples = len(upsample_rates)
+ self.conv_pre = Conv1d(
+ initial_channel, upsample_initial_channel, 7, 1, padding=3
+ )
+ resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2
+
+ self.ups = nn.ModuleList()
+ for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
+ self.ups.append(
+ weight_norm(
+ ConvTranspose1d(
+ upsample_initial_channel // (2**i),
+ upsample_initial_channel // (2 ** (i + 1)),
+ k,
+ u,
+ padding=(k - u) // 2,
+ )
+ )
+ )
+
+ self.resblocks = nn.ModuleList()
+ for i in range(len(self.ups)):
+ ch = upsample_initial_channel // (2 ** (i + 1))
+ for j, (k, d) in enumerate(
+ zip(resblock_kernel_sizes, resblock_dilation_sizes)
+ ):
+ self.resblocks.append(resblock(ch, k, d))
+
+ self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False)
+ self.ups.apply(init_weights)
+
+ if gin_channels != 0:
+ self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1)
+
+ def forward(self, x, g=None):
+ x = self.conv_pre(x)
+ if g is not None:
+ x = x + self.cond(g)
+
+ for i in range(self.num_upsamples):
+ x = F.leaky_relu(x, modules.LRELU_SLOPE)
+ x = self.ups[i](x)
+ xs = None
+ for j in range(self.num_kernels):
+ if xs is None:
+ xs = self.resblocks[i * self.num_kernels + j](x)
+ else:
+ xs += self.resblocks[i * self.num_kernels + j](x)
+ x = xs / self.num_kernels
+ x = F.leaky_relu(x)
+ x = self.conv_post(x)
+ x = torch.tanh(x)
+
+ return x
+
+ def remove_weight_norm(self):
+ for l in self.ups:
+ remove_weight_norm(l)
+ for l in self.resblocks:
+ l.remove_weight_norm()
+
+
+class SineGen(torch.nn.Module):
+ """Definition of sine generator
+ SineGen(samp_rate, harmonic_num = 0,
+ sine_amp = 0.1, noise_std = 0.003,
+ voiced_threshold = 0,
+ flag_for_pulse=False)
+ samp_rate: sampling rate in Hz
+ harmonic_num: number of harmonic overtones (default 0)
+ sine_amp: amplitude of sine-wavefrom (default 0.1)
+ noise_std: std of Gaussian noise (default 0.003)
+ voiced_thoreshold: F0 threshold for U/V classification (default 0)
+ flag_for_pulse: this SinGen is used inside PulseGen (default False)
+ Note: when flag_for_pulse is True, the first time step of a voiced
+ segment is always sin(np.pi) or cos(0)
+ """
+
+ def __init__(
+ self,
+ samp_rate,
+ harmonic_num=0,
+ sine_amp=0.1,
+ noise_std=0.003,
+ voiced_threshold=0,
+ flag_for_pulse=False,
+ ):
+ super(SineGen, self).__init__()
+ self.sine_amp = sine_amp
+ self.noise_std = noise_std
+ self.harmonic_num = harmonic_num
+ self.dim = self.harmonic_num + 1
+ self.sampling_rate = samp_rate
+ self.voiced_threshold = voiced_threshold
+
+ def _f02uv(self, f0):
+ # generate uv signal
+ uv = torch.ones_like(f0)
+ uv = uv * (f0 > self.voiced_threshold)
+ return uv.float()
+
+ def forward(self, f0, upp):
+ """sine_tensor, uv = forward(f0)
+ input F0: tensor(batchsize=1, length, dim=1)
+ f0 for unvoiced steps should be 0
+ output sine_tensor: tensor(batchsize=1, length, dim)
+ output uv: tensor(batchsize=1, length, 1)
+ """
+ with torch.no_grad():
+ f0 = f0[:, None].transpose(1, 2)
+ f0_buf = torch.zeros(f0.shape[0], f0.shape[1], self.dim, device=f0.device)
+ # fundamental component
+ f0_buf[:, :, 0] = f0[:, :, 0]
+ for idx in np.arange(self.harmonic_num):
+ f0_buf[:, :, idx + 1] = f0_buf[:, :, 0] * (
+ idx + 2
+ ) # idx + 2: the (idx+1)-th overtone, (idx+2)-th harmonic
+ rad_values = (f0_buf / self.sampling_rate) % 1 ###%1意味着n_har的乘积无法后处理优化
+ rand_ini = torch.rand(
+ f0_buf.shape[0], f0_buf.shape[2], device=f0_buf.device
+ )
+ rand_ini[:, 0] = 0
+ rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini
+ tmp_over_one = torch.cumsum(rad_values, 1) # % 1 #####%1意味着后面的cumsum无法再优化
+ tmp_over_one *= upp
+ tmp_over_one = F.interpolate(
+ tmp_over_one.transpose(2, 1),
+ scale_factor=upp,
+ mode="linear",
+ align_corners=True,
+ ).transpose(2, 1)
+ rad_values = F.interpolate(
+ rad_values.transpose(2, 1), scale_factor=upp, mode="nearest"
+ ).transpose(
+ 2, 1
+ ) #######
+ tmp_over_one %= 1
+ tmp_over_one_idx = (tmp_over_one[:, 1:, :] - tmp_over_one[:, :-1, :]) < 0
+ cumsum_shift = torch.zeros_like(rad_values)
+ cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0
+ sine_waves = torch.sin(
+ torch.cumsum(rad_values + cumsum_shift, dim=1) * 2 * np.pi
+ )
+ sine_waves = sine_waves * self.sine_amp
+ uv = self._f02uv(f0)
+ uv = F.interpolate(
+ uv.transpose(2, 1), scale_factor=upp, mode="nearest"
+ ).transpose(2, 1)
+ noise_amp = uv * self.noise_std + (1 - uv) * self.sine_amp / 3
+ noise = noise_amp * torch.randn_like(sine_waves)
+ sine_waves = sine_waves * uv + noise
+ return sine_waves, uv, noise
+
+
+class SourceModuleHnNSF(torch.nn.Module):
+ """SourceModule for hn-nsf
+ SourceModule(sampling_rate, harmonic_num=0, sine_amp=0.1,
+ add_noise_std=0.003, voiced_threshod=0)
+ sampling_rate: sampling_rate in Hz
+ harmonic_num: number of harmonic above F0 (default: 0)
+ sine_amp: amplitude of sine source signal (default: 0.1)
+ add_noise_std: std of additive Gaussian noise (default: 0.003)
+ note that amplitude of noise in unvoiced is decided
+ by sine_amp
+ voiced_threshold: threhold to set U/V given F0 (default: 0)
+ Sine_source, noise_source = SourceModuleHnNSF(F0_sampled)
+ F0_sampled (batchsize, length, 1)
+ Sine_source (batchsize, length, 1)
+ noise_source (batchsize, length 1)
+ uv (batchsize, length, 1)
+ """
+
+ def __init__(
+ self,
+ sampling_rate,
+ harmonic_num=0,
+ sine_amp=0.1,
+ add_noise_std=0.003,
+ voiced_threshod=0,
+ is_half=True,
+ ):
+ super(SourceModuleHnNSF, self).__init__()
+
+ self.sine_amp = sine_amp
+ self.noise_std = add_noise_std
+ self.is_half = is_half
+ # to produce sine waveforms
+ self.l_sin_gen = SineGen(
+ sampling_rate, harmonic_num, sine_amp, add_noise_std, voiced_threshod
+ )
+
+ # to merge source harmonics into a single excitation
+ self.l_linear = torch.nn.Linear(harmonic_num + 1, 1)
+ self.l_tanh = torch.nn.Tanh()
+
+ def forward(self, x, upp=None):
+ sine_wavs, uv, _ = self.l_sin_gen(x, upp)
+ if self.is_half:
+ sine_wavs = sine_wavs.half()
+ sine_merge = self.l_tanh(self.l_linear(sine_wavs))
+ return sine_merge, None, None # noise, uv
+
+
+class GeneratorNSF(torch.nn.Module):
+ def __init__(
+ self,
+ initial_channel,
+ resblock,
+ resblock_kernel_sizes,
+ resblock_dilation_sizes,
+ upsample_rates,
+ upsample_initial_channel,
+ upsample_kernel_sizes,
+ gin_channels,
+ sr,
+ is_half=False,
+ ):
+ super(GeneratorNSF, self).__init__()
+ self.num_kernels = len(resblock_kernel_sizes)
+ self.num_upsamples = len(upsample_rates)
+
+ self.f0_upsamp = torch.nn.Upsample(scale_factor=np.prod(upsample_rates))
+ self.m_source = SourceModuleHnNSF(
+ sampling_rate=sr, harmonic_num=0, is_half=is_half
+ )
+ self.noise_convs = nn.ModuleList()
+ self.conv_pre = Conv1d(
+ initial_channel, upsample_initial_channel, 7, 1, padding=3
+ )
+ resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2
+
+ self.ups = nn.ModuleList()
+ for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
+ c_cur = upsample_initial_channel // (2 ** (i + 1))
+ self.ups.append(
+ weight_norm(
+ ConvTranspose1d(
+ upsample_initial_channel // (2**i),
+ upsample_initial_channel // (2 ** (i + 1)),
+ k,
+ u,
+ padding=(k - u) // 2,
+ )
+ )
+ )
+ if i + 1 < len(upsample_rates):
+ stride_f0 = np.prod(upsample_rates[i + 1 :])
+ self.noise_convs.append(
+ Conv1d(
+ 1,
+ c_cur,
+ kernel_size=stride_f0 * 2,
+ stride=stride_f0,
+ padding=stride_f0 // 2,
+ )
+ )
+ else:
+ self.noise_convs.append(Conv1d(1, c_cur, kernel_size=1))
+
+ self.resblocks = nn.ModuleList()
+ for i in range(len(self.ups)):
+ ch = upsample_initial_channel // (2 ** (i + 1))
+ for j, (k, d) in enumerate(
+ zip(resblock_kernel_sizes, resblock_dilation_sizes)
+ ):
+ self.resblocks.append(resblock(ch, k, d))
+
+ self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False)
+ self.ups.apply(init_weights)
+
+ if gin_channels != 0:
+ self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1)
+
+ self.upp = np.prod(upsample_rates)
+
+ def forward(self, x, f0, g=None):
+ har_source, noi_source, uv = self.m_source(f0, self.upp)
+ har_source = har_source.transpose(1, 2)
+ x = self.conv_pre(x)
+ if g is not None:
+ x = x + self.cond(g)
+
+ for i in range(self.num_upsamples):
+ x = F.leaky_relu(x, modules.LRELU_SLOPE)
+ x = self.ups[i](x)
+ x_source = self.noise_convs[i](har_source)
+ x = x + x_source
+ xs = None
+ for j in range(self.num_kernels):
+ if xs is None:
+ xs = self.resblocks[i * self.num_kernels + j](x)
+ else:
+ xs += self.resblocks[i * self.num_kernels + j](x)
+ x = xs / self.num_kernels
+ x = F.leaky_relu(x)
+ x = self.conv_post(x)
+ x = torch.tanh(x)
+ return x
+
+ def remove_weight_norm(self):
+ for l in self.ups:
+ remove_weight_norm(l)
+ for l in self.resblocks:
+ l.remove_weight_norm()
+
+
+sr2sr = {
+ "32k": 32000,
+ "40k": 40000,
+ "48k": 48000,
+}
+
+
+class SynthesizerTrnMs256NSFsid(nn.Module):
+ def __init__(
+ self,
+ spec_channels,
+ segment_size,
+ inter_channels,
+ hidden_channels,
+ filter_channels,
+ n_heads,
+ n_layers,
+ kernel_size,
+ p_dropout,
+ resblock,
+ resblock_kernel_sizes,
+ resblock_dilation_sizes,
+ upsample_rates,
+ upsample_initial_channel,
+ upsample_kernel_sizes,
+ spk_embed_dim,
+ gin_channels,
+ sr,
+ **kwargs
+ ):
+ super().__init__()
+ if type(sr) == type("strr"):
+ sr = sr2sr[sr]
+ self.spec_channels = spec_channels
+ self.inter_channels = inter_channels
+ self.hidden_channels = hidden_channels
+ self.filter_channels = filter_channels
+ self.n_heads = n_heads
+ self.n_layers = n_layers
+ self.kernel_size = kernel_size
+ self.p_dropout = p_dropout
+ self.resblock = resblock
+ self.resblock_kernel_sizes = resblock_kernel_sizes
+ self.resblock_dilation_sizes = resblock_dilation_sizes
+ self.upsample_rates = upsample_rates
+ self.upsample_initial_channel = upsample_initial_channel
+ self.upsample_kernel_sizes = upsample_kernel_sizes
+ self.segment_size = segment_size
+ self.gin_channels = gin_channels
+ # self.hop_length = hop_length#
+ self.spk_embed_dim = spk_embed_dim
+ self.enc_p = TextEncoder256(
+ inter_channels,
+ hidden_channels,
+ filter_channels,
+ n_heads,
+ n_layers,
+ kernel_size,
+ p_dropout,
+ )
+ self.dec = GeneratorNSF(
+ inter_channels,
+ resblock,
+ resblock_kernel_sizes,
+ resblock_dilation_sizes,
+ upsample_rates,
+ upsample_initial_channel,
+ upsample_kernel_sizes,
+ gin_channels=gin_channels,
+ sr=sr,
+ is_half=kwargs["is_half"],
+ )
+ self.enc_q = PosteriorEncoder(
+ spec_channels,
+ inter_channels,
+ hidden_channels,
+ 5,
+ 1,
+ 16,
+ gin_channels=gin_channels,
+ )
+ self.flow = ResidualCouplingBlock(
+ inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels
+ )
+ self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)
+ print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim)
+
+ def remove_weight_norm(self):
+ self.dec.remove_weight_norm()
+ self.flow.remove_weight_norm()
+ self.enc_q.remove_weight_norm()
+
+ def forward(
+ self, phone, phone_lengths, pitch, pitchf, y, y_lengths, ds
+ ): # 这里ds是id,[bs,1]
+ # print(1,pitch.shape)#[bs,t]
+ g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的
+ m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)
+ z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)
+ z_p = self.flow(z, y_mask, g=g)
+ z_slice, ids_slice = commons.rand_slice_segments(
+ z, y_lengths, self.segment_size
+ )
+ # print(-1,pitchf.shape,ids_slice,self.segment_size,self.hop_length,self.segment_size//self.hop_length)
+ pitchf = commons.slice_segments2(pitchf, ids_slice, self.segment_size)
+ # print(-2,pitchf.shape,z_slice.shape)
+ o = self.dec(z_slice, pitchf, g=g)
+ return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)
+
+ def infer(self, phone, phone_lengths, pitch, nsff0, sid, max_len=None):
+ g = self.emb_g(sid).unsqueeze(-1)
+ m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)
+ z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask
+ z = self.flow(z_p, x_mask, g=g, reverse=True)
+ o = self.dec((z * x_mask)[:, :, :max_len], nsff0, g=g)
+ return o, x_mask, (z, z_p, m_p, logs_p)
+
+
+class SynthesizerTrnMs768NSFsid(nn.Module):
+ def __init__(
+ self,
+ spec_channels,
+ segment_size,
+ inter_channels,
+ hidden_channels,
+ filter_channels,
+ n_heads,
+ n_layers,
+ kernel_size,
+ p_dropout,
+ resblock,
+ resblock_kernel_sizes,
+ resblock_dilation_sizes,
+ upsample_rates,
+ upsample_initial_channel,
+ upsample_kernel_sizes,
+ spk_embed_dim,
+ gin_channels,
+ sr,
+ **kwargs
+ ):
+ super().__init__()
+ if type(sr) == type("strr"):
+ sr = sr2sr[sr]
+ self.spec_channels = spec_channels
+ self.inter_channels = inter_channels
+ self.hidden_channels = hidden_channels
+ self.filter_channels = filter_channels
+ self.n_heads = n_heads
+ self.n_layers = n_layers
+ self.kernel_size = kernel_size
+ self.p_dropout = p_dropout
+ self.resblock = resblock
+ self.resblock_kernel_sizes = resblock_kernel_sizes
+ self.resblock_dilation_sizes = resblock_dilation_sizes
+ self.upsample_rates = upsample_rates
+ self.upsample_initial_channel = upsample_initial_channel
+ self.upsample_kernel_sizes = upsample_kernel_sizes
+ self.segment_size = segment_size
+ self.gin_channels = gin_channels
+ # self.hop_length = hop_length#
+ self.spk_embed_dim = spk_embed_dim
+ self.enc_p = TextEncoder768(
+ inter_channels,
+ hidden_channels,
+ filter_channels,
+ n_heads,
+ n_layers,
+ kernel_size,
+ p_dropout,
+ )
+ self.dec = GeneratorNSF(
+ inter_channels,
+ resblock,
+ resblock_kernel_sizes,
+ resblock_dilation_sizes,
+ upsample_rates,
+ upsample_initial_channel,
+ upsample_kernel_sizes,
+ gin_channels=gin_channels,
+ sr=sr,
+ is_half=kwargs["is_half"],
+ )
+ self.enc_q = PosteriorEncoder(
+ spec_channels,
+ inter_channels,
+ hidden_channels,
+ 5,
+ 1,
+ 16,
+ gin_channels=gin_channels,
+ )
+ self.flow = ResidualCouplingBlock(
+ inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels
+ )
+ self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)
+ print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim)
+
+ def remove_weight_norm(self):
+ self.dec.remove_weight_norm()
+ self.flow.remove_weight_norm()
+ self.enc_q.remove_weight_norm()
+
+ def forward(
+ self, phone, phone_lengths, pitch, pitchf, y, y_lengths, ds
+ ): # 这里ds是id,[bs,1]
+ # print(1,pitch.shape)#[bs,t]
+ g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的
+ m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)
+ z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)
+ z_p = self.flow(z, y_mask, g=g)
+ z_slice, ids_slice = commons.rand_slice_segments(
+ z, y_lengths, self.segment_size
+ )
+ # print(-1,pitchf.shape,ids_slice,self.segment_size,self.hop_length,self.segment_size//self.hop_length)
+ pitchf = commons.slice_segments2(pitchf, ids_slice, self.segment_size)
+ # print(-2,pitchf.shape,z_slice.shape)
+ o = self.dec(z_slice, pitchf, g=g)
+ return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)
+
+ def infer(self, phone, phone_lengths, pitch, nsff0, sid, max_len=None):
+ g = self.emb_g(sid).unsqueeze(-1)
+ m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)
+ z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask
+ z = self.flow(z_p, x_mask, g=g, reverse=True)
+ o = self.dec((z * x_mask)[:, :, :max_len], nsff0, g=g)
+ return o, x_mask, (z, z_p, m_p, logs_p)
+
+
+class SynthesizerTrnMs256NSFsid_nono(nn.Module):
+ def __init__(
+ self,
+ spec_channels,
+ segment_size,
+ inter_channels,
+ hidden_channels,
+ filter_channels,
+ n_heads,
+ n_layers,
+ kernel_size,
+ p_dropout,
+ resblock,
+ resblock_kernel_sizes,
+ resblock_dilation_sizes,
+ upsample_rates,
+ upsample_initial_channel,
+ upsample_kernel_sizes,
+ spk_embed_dim,
+ gin_channels,
+ sr=None,
+ **kwargs
+ ):
+ super().__init__()
+ self.spec_channels = spec_channels
+ self.inter_channels = inter_channels
+ self.hidden_channels = hidden_channels
+ self.filter_channels = filter_channels
+ self.n_heads = n_heads
+ self.n_layers = n_layers
+ self.kernel_size = kernel_size
+ self.p_dropout = p_dropout
+ self.resblock = resblock
+ self.resblock_kernel_sizes = resblock_kernel_sizes
+ self.resblock_dilation_sizes = resblock_dilation_sizes
+ self.upsample_rates = upsample_rates
+ self.upsample_initial_channel = upsample_initial_channel
+ self.upsample_kernel_sizes = upsample_kernel_sizes
+ self.segment_size = segment_size
+ self.gin_channels = gin_channels
+ # self.hop_length = hop_length#
+ self.spk_embed_dim = spk_embed_dim
+ self.enc_p = TextEncoder256(
+ inter_channels,
+ hidden_channels,
+ filter_channels,
+ n_heads,
+ n_layers,
+ kernel_size,
+ p_dropout,
+ f0=False,
+ )
+ self.dec = Generator(
+ inter_channels,
+ resblock,
+ resblock_kernel_sizes,
+ resblock_dilation_sizes,
+ upsample_rates,
+ upsample_initial_channel,
+ upsample_kernel_sizes,
+ gin_channels=gin_channels,
+ )
+ self.enc_q = PosteriorEncoder(
+ spec_channels,
+ inter_channels,
+ hidden_channels,
+ 5,
+ 1,
+ 16,
+ gin_channels=gin_channels,
+ )
+ self.flow = ResidualCouplingBlock(
+ inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels
+ )
+ self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)
+ print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim)
+
+ def remove_weight_norm(self):
+ self.dec.remove_weight_norm()
+ self.flow.remove_weight_norm()
+ self.enc_q.remove_weight_norm()
+
+ def forward(self, phone, phone_lengths, y, y_lengths, ds): # 这里ds是id,[bs,1]
+ g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的
+ m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)
+ z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)
+ z_p = self.flow(z, y_mask, g=g)
+ z_slice, ids_slice = commons.rand_slice_segments(
+ z, y_lengths, self.segment_size
+ )
+ o = self.dec(z_slice, g=g)
+ return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)
+
+ def infer(self, phone, phone_lengths, sid, max_len=None):
+ g = self.emb_g(sid).unsqueeze(-1)
+ m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)
+ z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask
+ z = self.flow(z_p, x_mask, g=g, reverse=True)
+ o = self.dec((z * x_mask)[:, :, :max_len], g=g)
+ return o, x_mask, (z, z_p, m_p, logs_p)
+
+
+class SynthesizerTrnMs768NSFsid_nono(nn.Module):
+ def __init__(
+ self,
+ spec_channels,
+ segment_size,
+ inter_channels,
+ hidden_channels,
+ filter_channels,
+ n_heads,
+ n_layers,
+ kernel_size,
+ p_dropout,
+ resblock,
+ resblock_kernel_sizes,
+ resblock_dilation_sizes,
+ upsample_rates,
+ upsample_initial_channel,
+ upsample_kernel_sizes,
+ spk_embed_dim,
+ gin_channels,
+ sr=None,
+ **kwargs
+ ):
+ super().__init__()
+ self.spec_channels = spec_channels
+ self.inter_channels = inter_channels
+ self.hidden_channels = hidden_channels
+ self.filter_channels = filter_channels
+ self.n_heads = n_heads
+ self.n_layers = n_layers
+ self.kernel_size = kernel_size
+ self.p_dropout = p_dropout
+ self.resblock = resblock
+ self.resblock_kernel_sizes = resblock_kernel_sizes
+ self.resblock_dilation_sizes = resblock_dilation_sizes
+ self.upsample_rates = upsample_rates
+ self.upsample_initial_channel = upsample_initial_channel
+ self.upsample_kernel_sizes = upsample_kernel_sizes
+ self.segment_size = segment_size
+ self.gin_channels = gin_channels
+ # self.hop_length = hop_length#
+ self.spk_embed_dim = spk_embed_dim
+ self.enc_p = TextEncoder768(
+ inter_channels,
+ hidden_channels,
+ filter_channels,
+ n_heads,
+ n_layers,
+ kernel_size,
+ p_dropout,
+ f0=False,
+ )
+ self.dec = Generator(
+ inter_channels,
+ resblock,
+ resblock_kernel_sizes,
+ resblock_dilation_sizes,
+ upsample_rates,
+ upsample_initial_channel,
+ upsample_kernel_sizes,
+ gin_channels=gin_channels,
+ )
+ self.enc_q = PosteriorEncoder(
+ spec_channels,
+ inter_channels,
+ hidden_channels,
+ 5,
+ 1,
+ 16,
+ gin_channels=gin_channels,
+ )
+ self.flow = ResidualCouplingBlock(
+ inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels
+ )
+ self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)
+ print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim)
+
+ def remove_weight_norm(self):
+ self.dec.remove_weight_norm()
+ self.flow.remove_weight_norm()
+ self.enc_q.remove_weight_norm()
+
+ def forward(self, phone, phone_lengths, y, y_lengths, ds): # 这里ds是id,[bs,1]
+ g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的
+ m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)
+ z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)
+ z_p = self.flow(z, y_mask, g=g)
+ z_slice, ids_slice = commons.rand_slice_segments(
+ z, y_lengths, self.segment_size
+ )
+ o = self.dec(z_slice, g=g)
+ return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)
+
+ def infer(self, phone, phone_lengths, sid, max_len=None):
+ g = self.emb_g(sid).unsqueeze(-1)
+ m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)
+ z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask
+ z = self.flow(z_p, x_mask, g=g, reverse=True)
+ o = self.dec((z * x_mask)[:, :, :max_len], g=g)
+ return o, x_mask, (z, z_p, m_p, logs_p)
+
+
+class MultiPeriodDiscriminator(torch.nn.Module):
+ def __init__(self, use_spectral_norm=False):
+ super(MultiPeriodDiscriminator, self).__init__()
+ periods = [2, 3, 5, 7, 11, 17]
+ # periods = [3, 5, 7, 11, 17, 23, 37]
+
+ discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)]
+ discs = discs + [
+ DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods
+ ]
+ self.discriminators = nn.ModuleList(discs)
+
+ def forward(self, y, y_hat):
+ y_d_rs = [] #
+ y_d_gs = []
+ fmap_rs = []
+ fmap_gs = []
+ for i, d in enumerate(self.discriminators):
+ y_d_r, fmap_r = d(y)
+ y_d_g, fmap_g = d(y_hat)
+ # for j in range(len(fmap_r)):
+ # print(i,j,y.shape,y_hat.shape,fmap_r[j].shape,fmap_g[j].shape)
+ y_d_rs.append(y_d_r)
+ y_d_gs.append(y_d_g)
+ fmap_rs.append(fmap_r)
+ fmap_gs.append(fmap_g)
+
+ return y_d_rs, y_d_gs, fmap_rs, fmap_gs
+
+
+class MultiPeriodDiscriminatorV2(torch.nn.Module):
+ def __init__(self, use_spectral_norm=False):
+ super(MultiPeriodDiscriminatorV2, self).__init__()
+ # periods = [2, 3, 5, 7, 11, 17]
+ periods = [2, 3, 5, 7, 11, 17, 23, 37]
+
+ discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)]
+ discs = discs + [
+ DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods
+ ]
+ self.discriminators = nn.ModuleList(discs)
+
+ def forward(self, y, y_hat):
+ y_d_rs = [] #
+ y_d_gs = []
+ fmap_rs = []
+ fmap_gs = []
+ for i, d in enumerate(self.discriminators):
+ y_d_r, fmap_r = d(y)
+ y_d_g, fmap_g = d(y_hat)
+ # for j in range(len(fmap_r)):
+ # print(i,j,y.shape,y_hat.shape,fmap_r[j].shape,fmap_g[j].shape)
+ y_d_rs.append(y_d_r)
+ y_d_gs.append(y_d_g)
+ fmap_rs.append(fmap_r)
+ fmap_gs.append(fmap_g)
+
+ return y_d_rs, y_d_gs, fmap_rs, fmap_gs
+
+
+class DiscriminatorS(torch.nn.Module):
+ def __init__(self, use_spectral_norm=False):
+ super(DiscriminatorS, self).__init__()
+ norm_f = weight_norm if use_spectral_norm == False else spectral_norm
+ self.convs = nn.ModuleList(
+ [
+ norm_f(Conv1d(1, 16, 15, 1, padding=7)),
+ norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)),
+ norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)),
+ norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)),
+ norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)),
+ norm_f(Conv1d(1024, 1024, 5, 1, padding=2)),
+ ]
+ )
+ self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1))
+
+ def forward(self, x):
+ fmap = []
+
+ for l in self.convs:
+ x = l(x)
+ x = F.leaky_relu(x, modules.LRELU_SLOPE)
+ fmap.append(x)
+ x = self.conv_post(x)
+ fmap.append(x)
+ x = torch.flatten(x, 1, -1)
+
+ return x, fmap
+
+
+class DiscriminatorP(torch.nn.Module):
+ def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False):
+ super(DiscriminatorP, self).__init__()
+ self.period = period
+ self.use_spectral_norm = use_spectral_norm
+ norm_f = weight_norm if use_spectral_norm == False else spectral_norm
+ self.convs = nn.ModuleList(
+ [
+ norm_f(
+ Conv2d(
+ 1,
+ 32,
+ (kernel_size, 1),
+ (stride, 1),
+ padding=(get_padding(kernel_size, 1), 0),
+ )
+ ),
+ norm_f(
+ Conv2d(
+ 32,
+ 128,
+ (kernel_size, 1),
+ (stride, 1),
+ padding=(get_padding(kernel_size, 1), 0),
+ )
+ ),
+ norm_f(
+ Conv2d(
+ 128,
+ 512,
+ (kernel_size, 1),
+ (stride, 1),
+ padding=(get_padding(kernel_size, 1), 0),
+ )
+ ),
+ norm_f(
+ Conv2d(
+ 512,
+ 1024,
+ (kernel_size, 1),
+ (stride, 1),
+ padding=(get_padding(kernel_size, 1), 0),
+ )
+ ),
+ norm_f(
+ Conv2d(
+ 1024,
+ 1024,
+ (kernel_size, 1),
+ 1,
+ padding=(get_padding(kernel_size, 1), 0),
+ )
+ ),
+ ]
+ )
+ self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0)))
+
+ def forward(self, x):
+ fmap = []
+
+ # 1d to 2d
+ b, c, t = x.shape
+ if t % self.period != 0: # pad first
+ n_pad = self.period - (t % self.period)
+ x = F.pad(x, (0, n_pad), "reflect")
+ t = t + n_pad
+ x = x.view(b, c, t // self.period, self.period)
+
+ for l in self.convs:
+ x = l(x)
+ x = F.leaky_relu(x, modules.LRELU_SLOPE)
+ fmap.append(x)
+ x = self.conv_post(x)
+ fmap.append(x)
+ x = torch.flatten(x, 1, -1)
+
+ return x, fmap
diff --git a/Waifu-Anime-RCV/lib/infer_pack/models_onnx.py b/Waifu-Anime-RCV/lib/infer_pack/models_onnx.py
new file mode 100644
index 0000000000000000000000000000000000000000..963e67b29f828e9fdd096397952054fe77cf3d10
--- /dev/null
+++ b/Waifu-Anime-RCV/lib/infer_pack/models_onnx.py
@@ -0,0 +1,819 @@
+import math, pdb, os
+from time import time as ttime
+import torch
+from torch import nn
+from torch.nn import functional as F
+from lib.infer_pack import modules
+from lib.infer_pack import attentions
+from lib.infer_pack import commons
+from lib.infer_pack.commons import init_weights, get_padding
+from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d
+from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm
+from lib.infer_pack.commons import init_weights
+import numpy as np
+from lib.infer_pack import commons
+
+
+class TextEncoder256(nn.Module):
+ def __init__(
+ self,
+ out_channels,
+ hidden_channels,
+ filter_channels,
+ n_heads,
+ n_layers,
+ kernel_size,
+ p_dropout,
+ f0=True,
+ ):
+ super().__init__()
+ self.out_channels = out_channels
+ self.hidden_channels = hidden_channels
+ self.filter_channels = filter_channels
+ self.n_heads = n_heads
+ self.n_layers = n_layers
+ self.kernel_size = kernel_size
+ self.p_dropout = p_dropout
+ self.emb_phone = nn.Linear(256, hidden_channels)
+ self.lrelu = nn.LeakyReLU(0.1, inplace=True)
+ if f0 == True:
+ self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256
+ self.encoder = attentions.Encoder(
+ hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout
+ )
+ self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
+
+ def forward(self, phone, pitch, lengths):
+ if pitch == None:
+ x = self.emb_phone(phone)
+ else:
+ x = self.emb_phone(phone) + self.emb_pitch(pitch)
+ x = x * math.sqrt(self.hidden_channels) # [b, t, h]
+ x = self.lrelu(x)
+ x = torch.transpose(x, 1, -1) # [b, h, t]
+ x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to(
+ x.dtype
+ )
+ x = self.encoder(x * x_mask, x_mask)
+ stats = self.proj(x) * x_mask
+
+ m, logs = torch.split(stats, self.out_channels, dim=1)
+ return m, logs, x_mask
+
+
+class TextEncoder768(nn.Module):
+ def __init__(
+ self,
+ out_channels,
+ hidden_channels,
+ filter_channels,
+ n_heads,
+ n_layers,
+ kernel_size,
+ p_dropout,
+ f0=True,
+ ):
+ super().__init__()
+ self.out_channels = out_channels
+ self.hidden_channels = hidden_channels
+ self.filter_channels = filter_channels
+ self.n_heads = n_heads
+ self.n_layers = n_layers
+ self.kernel_size = kernel_size
+ self.p_dropout = p_dropout
+ self.emb_phone = nn.Linear(768, hidden_channels)
+ self.lrelu = nn.LeakyReLU(0.1, inplace=True)
+ if f0 == True:
+ self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256
+ self.encoder = attentions.Encoder(
+ hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout
+ )
+ self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
+
+ def forward(self, phone, pitch, lengths):
+ if pitch == None:
+ x = self.emb_phone(phone)
+ else:
+ x = self.emb_phone(phone) + self.emb_pitch(pitch)
+ x = x * math.sqrt(self.hidden_channels) # [b, t, h]
+ x = self.lrelu(x)
+ x = torch.transpose(x, 1, -1) # [b, h, t]
+ x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to(
+ x.dtype
+ )
+ x = self.encoder(x * x_mask, x_mask)
+ stats = self.proj(x) * x_mask
+
+ m, logs = torch.split(stats, self.out_channels, dim=1)
+ return m, logs, x_mask
+
+
+class ResidualCouplingBlock(nn.Module):
+ def __init__(
+ self,
+ channels,
+ hidden_channels,
+ kernel_size,
+ dilation_rate,
+ n_layers,
+ n_flows=4,
+ gin_channels=0,
+ ):
+ super().__init__()
+ self.channels = channels
+ self.hidden_channels = hidden_channels
+ self.kernel_size = kernel_size
+ self.dilation_rate = dilation_rate
+ self.n_layers = n_layers
+ self.n_flows = n_flows
+ self.gin_channels = gin_channels
+
+ self.flows = nn.ModuleList()
+ for i in range(n_flows):
+ self.flows.append(
+ modules.ResidualCouplingLayer(
+ channels,
+ hidden_channels,
+ kernel_size,
+ dilation_rate,
+ n_layers,
+ gin_channels=gin_channels,
+ mean_only=True,
+ )
+ )
+ self.flows.append(modules.Flip())
+
+ def forward(self, x, x_mask, g=None, reverse=False):
+ if not reverse:
+ for flow in self.flows:
+ x, _ = flow(x, x_mask, g=g, reverse=reverse)
+ else:
+ for flow in reversed(self.flows):
+ x = flow(x, x_mask, g=g, reverse=reverse)
+ return x
+
+ def remove_weight_norm(self):
+ for i in range(self.n_flows):
+ self.flows[i * 2].remove_weight_norm()
+
+
+class PosteriorEncoder(nn.Module):
+ def __init__(
+ self,
+ in_channels,
+ out_channels,
+ hidden_channels,
+ kernel_size,
+ dilation_rate,
+ n_layers,
+ gin_channels=0,
+ ):
+ super().__init__()
+ self.in_channels = in_channels
+ self.out_channels = out_channels
+ self.hidden_channels = hidden_channels
+ self.kernel_size = kernel_size
+ self.dilation_rate = dilation_rate
+ self.n_layers = n_layers
+ self.gin_channels = gin_channels
+
+ self.pre = nn.Conv1d(in_channels, hidden_channels, 1)
+ self.enc = modules.WN(
+ hidden_channels,
+ kernel_size,
+ dilation_rate,
+ n_layers,
+ gin_channels=gin_channels,
+ )
+ self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
+
+ def forward(self, x, x_lengths, g=None):
+ x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(
+ x.dtype
+ )
+ x = self.pre(x) * x_mask
+ x = self.enc(x, x_mask, g=g)
+ stats = self.proj(x) * x_mask
+ m, logs = torch.split(stats, self.out_channels, dim=1)
+ z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask
+ return z, m, logs, x_mask
+
+ def remove_weight_norm(self):
+ self.enc.remove_weight_norm()
+
+
+class Generator(torch.nn.Module):
+ def __init__(
+ self,
+ initial_channel,
+ resblock,
+ resblock_kernel_sizes,
+ resblock_dilation_sizes,
+ upsample_rates,
+ upsample_initial_channel,
+ upsample_kernel_sizes,
+ gin_channels=0,
+ ):
+ super(Generator, self).__init__()
+ self.num_kernels = len(resblock_kernel_sizes)
+ self.num_upsamples = len(upsample_rates)
+ self.conv_pre = Conv1d(
+ initial_channel, upsample_initial_channel, 7, 1, padding=3
+ )
+ resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2
+
+ self.ups = nn.ModuleList()
+ for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
+ self.ups.append(
+ weight_norm(
+ ConvTranspose1d(
+ upsample_initial_channel // (2**i),
+ upsample_initial_channel // (2 ** (i + 1)),
+ k,
+ u,
+ padding=(k - u) // 2,
+ )
+ )
+ )
+
+ self.resblocks = nn.ModuleList()
+ for i in range(len(self.ups)):
+ ch = upsample_initial_channel // (2 ** (i + 1))
+ for j, (k, d) in enumerate(
+ zip(resblock_kernel_sizes, resblock_dilation_sizes)
+ ):
+ self.resblocks.append(resblock(ch, k, d))
+
+ self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False)
+ self.ups.apply(init_weights)
+
+ if gin_channels != 0:
+ self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1)
+
+ def forward(self, x, g=None):
+ x = self.conv_pre(x)
+ if g is not None:
+ x = x + self.cond(g)
+
+ for i in range(self.num_upsamples):
+ x = F.leaky_relu(x, modules.LRELU_SLOPE)
+ x = self.ups[i](x)
+ xs = None
+ for j in range(self.num_kernels):
+ if xs is None:
+ xs = self.resblocks[i * self.num_kernels + j](x)
+ else:
+ xs += self.resblocks[i * self.num_kernels + j](x)
+ x = xs / self.num_kernels
+ x = F.leaky_relu(x)
+ x = self.conv_post(x)
+ x = torch.tanh(x)
+
+ return x
+
+ def remove_weight_norm(self):
+ for l in self.ups:
+ remove_weight_norm(l)
+ for l in self.resblocks:
+ l.remove_weight_norm()
+
+
+class SineGen(torch.nn.Module):
+ """Definition of sine generator
+ SineGen(samp_rate, harmonic_num = 0,
+ sine_amp = 0.1, noise_std = 0.003,
+ voiced_threshold = 0,
+ flag_for_pulse=False)
+ samp_rate: sampling rate in Hz
+ harmonic_num: number of harmonic overtones (default 0)
+ sine_amp: amplitude of sine-wavefrom (default 0.1)
+ noise_std: std of Gaussian noise (default 0.003)
+ voiced_thoreshold: F0 threshold for U/V classification (default 0)
+ flag_for_pulse: this SinGen is used inside PulseGen (default False)
+ Note: when flag_for_pulse is True, the first time step of a voiced
+ segment is always sin(np.pi) or cos(0)
+ """
+
+ def __init__(
+ self,
+ samp_rate,
+ harmonic_num=0,
+ sine_amp=0.1,
+ noise_std=0.003,
+ voiced_threshold=0,
+ flag_for_pulse=False,
+ ):
+ super(SineGen, self).__init__()
+ self.sine_amp = sine_amp
+ self.noise_std = noise_std
+ self.harmonic_num = harmonic_num
+ self.dim = self.harmonic_num + 1
+ self.sampling_rate = samp_rate
+ self.voiced_threshold = voiced_threshold
+
+ def _f02uv(self, f0):
+ # generate uv signal
+ uv = torch.ones_like(f0)
+ uv = uv * (f0 > self.voiced_threshold)
+ return uv
+
+ def forward(self, f0, upp):
+ """sine_tensor, uv = forward(f0)
+ input F0: tensor(batchsize=1, length, dim=1)
+ f0 for unvoiced steps should be 0
+ output sine_tensor: tensor(batchsize=1, length, dim)
+ output uv: tensor(batchsize=1, length, 1)
+ """
+ with torch.no_grad():
+ f0 = f0[:, None].transpose(1, 2)
+ f0_buf = torch.zeros(f0.shape[0], f0.shape[1], self.dim, device=f0.device)
+ # fundamental component
+ f0_buf[:, :, 0] = f0[:, :, 0]
+ for idx in np.arange(self.harmonic_num):
+ f0_buf[:, :, idx + 1] = f0_buf[:, :, 0] * (
+ idx + 2
+ ) # idx + 2: the (idx+1)-th overtone, (idx+2)-th harmonic
+ rad_values = (f0_buf / self.sampling_rate) % 1 ###%1意味着n_har的乘积无法后处理优化
+ rand_ini = torch.rand(
+ f0_buf.shape[0], f0_buf.shape[2], device=f0_buf.device
+ )
+ rand_ini[:, 0] = 0
+ rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini
+ tmp_over_one = torch.cumsum(rad_values, 1) # % 1 #####%1意味着后面的cumsum无法再优化
+ tmp_over_one *= upp
+ tmp_over_one = F.interpolate(
+ tmp_over_one.transpose(2, 1),
+ scale_factor=upp,
+ mode="linear",
+ align_corners=True,
+ ).transpose(2, 1)
+ rad_values = F.interpolate(
+ rad_values.transpose(2, 1), scale_factor=upp, mode="nearest"
+ ).transpose(
+ 2, 1
+ ) #######
+ tmp_over_one %= 1
+ tmp_over_one_idx = (tmp_over_one[:, 1:, :] - tmp_over_one[:, :-1, :]) < 0
+ cumsum_shift = torch.zeros_like(rad_values)
+ cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0
+ sine_waves = torch.sin(
+ torch.cumsum(rad_values + cumsum_shift, dim=1) * 2 * np.pi
+ )
+ sine_waves = sine_waves * self.sine_amp
+ uv = self._f02uv(f0)
+ uv = F.interpolate(
+ uv.transpose(2, 1), scale_factor=upp, mode="nearest"
+ ).transpose(2, 1)
+ noise_amp = uv * self.noise_std + (1 - uv) * self.sine_amp / 3
+ noise = noise_amp * torch.randn_like(sine_waves)
+ sine_waves = sine_waves * uv + noise
+ return sine_waves, uv, noise
+
+
+class SourceModuleHnNSF(torch.nn.Module):
+ """SourceModule for hn-nsf
+ SourceModule(sampling_rate, harmonic_num=0, sine_amp=0.1,
+ add_noise_std=0.003, voiced_threshod=0)
+ sampling_rate: sampling_rate in Hz
+ harmonic_num: number of harmonic above F0 (default: 0)
+ sine_amp: amplitude of sine source signal (default: 0.1)
+ add_noise_std: std of additive Gaussian noise (default: 0.003)
+ note that amplitude of noise in unvoiced is decided
+ by sine_amp
+ voiced_threshold: threhold to set U/V given F0 (default: 0)
+ Sine_source, noise_source = SourceModuleHnNSF(F0_sampled)
+ F0_sampled (batchsize, length, 1)
+ Sine_source (batchsize, length, 1)
+ noise_source (batchsize, length 1)
+ uv (batchsize, length, 1)
+ """
+
+ def __init__(
+ self,
+ sampling_rate,
+ harmonic_num=0,
+ sine_amp=0.1,
+ add_noise_std=0.003,
+ voiced_threshod=0,
+ is_half=True,
+ ):
+ super(SourceModuleHnNSF, self).__init__()
+
+ self.sine_amp = sine_amp
+ self.noise_std = add_noise_std
+ self.is_half = is_half
+ # to produce sine waveforms
+ self.l_sin_gen = SineGen(
+ sampling_rate, harmonic_num, sine_amp, add_noise_std, voiced_threshod
+ )
+
+ # to merge source harmonics into a single excitation
+ self.l_linear = torch.nn.Linear(harmonic_num + 1, 1)
+ self.l_tanh = torch.nn.Tanh()
+
+ def forward(self, x, upp=None):
+ sine_wavs, uv, _ = self.l_sin_gen(x, upp)
+ if self.is_half:
+ sine_wavs = sine_wavs.half()
+ sine_merge = self.l_tanh(self.l_linear(sine_wavs))
+ return sine_merge, None, None # noise, uv
+
+
+class GeneratorNSF(torch.nn.Module):
+ def __init__(
+ self,
+ initial_channel,
+ resblock,
+ resblock_kernel_sizes,
+ resblock_dilation_sizes,
+ upsample_rates,
+ upsample_initial_channel,
+ upsample_kernel_sizes,
+ gin_channels,
+ sr,
+ is_half=False,
+ ):
+ super(GeneratorNSF, self).__init__()
+ self.num_kernels = len(resblock_kernel_sizes)
+ self.num_upsamples = len(upsample_rates)
+
+ self.f0_upsamp = torch.nn.Upsample(scale_factor=np.prod(upsample_rates))
+ self.m_source = SourceModuleHnNSF(
+ sampling_rate=sr, harmonic_num=0, is_half=is_half
+ )
+ self.noise_convs = nn.ModuleList()
+ self.conv_pre = Conv1d(
+ initial_channel, upsample_initial_channel, 7, 1, padding=3
+ )
+ resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2
+
+ self.ups = nn.ModuleList()
+ for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
+ c_cur = upsample_initial_channel // (2 ** (i + 1))
+ self.ups.append(
+ weight_norm(
+ ConvTranspose1d(
+ upsample_initial_channel // (2**i),
+ upsample_initial_channel // (2 ** (i + 1)),
+ k,
+ u,
+ padding=(k - u) // 2,
+ )
+ )
+ )
+ if i + 1 < len(upsample_rates):
+ stride_f0 = np.prod(upsample_rates[i + 1 :])
+ self.noise_convs.append(
+ Conv1d(
+ 1,
+ c_cur,
+ kernel_size=stride_f0 * 2,
+ stride=stride_f0,
+ padding=stride_f0 // 2,
+ )
+ )
+ else:
+ self.noise_convs.append(Conv1d(1, c_cur, kernel_size=1))
+
+ self.resblocks = nn.ModuleList()
+ for i in range(len(self.ups)):
+ ch = upsample_initial_channel // (2 ** (i + 1))
+ for j, (k, d) in enumerate(
+ zip(resblock_kernel_sizes, resblock_dilation_sizes)
+ ):
+ self.resblocks.append(resblock(ch, k, d))
+
+ self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False)
+ self.ups.apply(init_weights)
+
+ if gin_channels != 0:
+ self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1)
+
+ self.upp = np.prod(upsample_rates)
+
+ def forward(self, x, f0, g=None):
+ har_source, noi_source, uv = self.m_source(f0, self.upp)
+ har_source = har_source.transpose(1, 2)
+ x = self.conv_pre(x)
+ if g is not None:
+ x = x + self.cond(g)
+
+ for i in range(self.num_upsamples):
+ x = F.leaky_relu(x, modules.LRELU_SLOPE)
+ x = self.ups[i](x)
+ x_source = self.noise_convs[i](har_source)
+ x = x + x_source
+ xs = None
+ for j in range(self.num_kernels):
+ if xs is None:
+ xs = self.resblocks[i * self.num_kernels + j](x)
+ else:
+ xs += self.resblocks[i * self.num_kernels + j](x)
+ x = xs / self.num_kernels
+ x = F.leaky_relu(x)
+ x = self.conv_post(x)
+ x = torch.tanh(x)
+ return x
+
+ def remove_weight_norm(self):
+ for l in self.ups:
+ remove_weight_norm(l)
+ for l in self.resblocks:
+ l.remove_weight_norm()
+
+
+sr2sr = {
+ "32k": 32000,
+ "40k": 40000,
+ "48k": 48000,
+}
+
+
+class SynthesizerTrnMsNSFsidM(nn.Module):
+ def __init__(
+ self,
+ spec_channels,
+ segment_size,
+ inter_channels,
+ hidden_channels,
+ filter_channels,
+ n_heads,
+ n_layers,
+ kernel_size,
+ p_dropout,
+ resblock,
+ resblock_kernel_sizes,
+ resblock_dilation_sizes,
+ upsample_rates,
+ upsample_initial_channel,
+ upsample_kernel_sizes,
+ spk_embed_dim,
+ gin_channels,
+ sr,
+ version,
+ **kwargs
+ ):
+ super().__init__()
+ if type(sr) == type("strr"):
+ sr = sr2sr[sr]
+ self.spec_channels = spec_channels
+ self.inter_channels = inter_channels
+ self.hidden_channels = hidden_channels
+ self.filter_channels = filter_channels
+ self.n_heads = n_heads
+ self.n_layers = n_layers
+ self.kernel_size = kernel_size
+ self.p_dropout = p_dropout
+ self.resblock = resblock
+ self.resblock_kernel_sizes = resblock_kernel_sizes
+ self.resblock_dilation_sizes = resblock_dilation_sizes
+ self.upsample_rates = upsample_rates
+ self.upsample_initial_channel = upsample_initial_channel
+ self.upsample_kernel_sizes = upsample_kernel_sizes
+ self.segment_size = segment_size
+ self.gin_channels = gin_channels
+ # self.hop_length = hop_length#
+ self.spk_embed_dim = spk_embed_dim
+ if version == "v1":
+ self.enc_p = TextEncoder256(
+ inter_channels,
+ hidden_channels,
+ filter_channels,
+ n_heads,
+ n_layers,
+ kernel_size,
+ p_dropout,
+ )
+ else:
+ self.enc_p = TextEncoder768(
+ inter_channels,
+ hidden_channels,
+ filter_channels,
+ n_heads,
+ n_layers,
+ kernel_size,
+ p_dropout,
+ )
+ self.dec = GeneratorNSF(
+ inter_channels,
+ resblock,
+ resblock_kernel_sizes,
+ resblock_dilation_sizes,
+ upsample_rates,
+ upsample_initial_channel,
+ upsample_kernel_sizes,
+ gin_channels=gin_channels,
+ sr=sr,
+ is_half=kwargs["is_half"],
+ )
+ self.enc_q = PosteriorEncoder(
+ spec_channels,
+ inter_channels,
+ hidden_channels,
+ 5,
+ 1,
+ 16,
+ gin_channels=gin_channels,
+ )
+ self.flow = ResidualCouplingBlock(
+ inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels
+ )
+ self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)
+ self.speaker_map = None
+ print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim)
+
+ def remove_weight_norm(self):
+ self.dec.remove_weight_norm()
+ self.flow.remove_weight_norm()
+ self.enc_q.remove_weight_norm()
+
+ def construct_spkmixmap(self, n_speaker):
+ self.speaker_map = torch.zeros((n_speaker, 1, 1, self.gin_channels))
+ for i in range(n_speaker):
+ self.speaker_map[i] = self.emb_g(torch.LongTensor([[i]]))
+ self.speaker_map = self.speaker_map.unsqueeze(0)
+
+ def forward(self, phone, phone_lengths, pitch, nsff0, g, rnd, max_len=None):
+ if self.speaker_map is not None: # [N, S] * [S, B, 1, H]
+ g = g.reshape((g.shape[0], g.shape[1], 1, 1, 1)) # [N, S, B, 1, 1]
+ g = g * self.speaker_map # [N, S, B, 1, H]
+ g = torch.sum(g, dim=1) # [N, 1, B, 1, H]
+ g = g.transpose(0, -1).transpose(0, -2).squeeze(0) # [B, H, N]
+ else:
+ g = g.unsqueeze(0)
+ g = self.emb_g(g).transpose(1, 2)
+
+ m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)
+ z_p = (m_p + torch.exp(logs_p) * rnd) * x_mask
+ z = self.flow(z_p, x_mask, g=g, reverse=True)
+ o = self.dec((z * x_mask)[:, :, :max_len], nsff0, g=g)
+ return o
+
+
+class MultiPeriodDiscriminator(torch.nn.Module):
+ def __init__(self, use_spectral_norm=False):
+ super(MultiPeriodDiscriminator, self).__init__()
+ periods = [2, 3, 5, 7, 11, 17]
+ # periods = [3, 5, 7, 11, 17, 23, 37]
+
+ discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)]
+ discs = discs + [
+ DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods
+ ]
+ self.discriminators = nn.ModuleList(discs)
+
+ def forward(self, y, y_hat):
+ y_d_rs = [] #
+ y_d_gs = []
+ fmap_rs = []
+ fmap_gs = []
+ for i, d in enumerate(self.discriminators):
+ y_d_r, fmap_r = d(y)
+ y_d_g, fmap_g = d(y_hat)
+ # for j in range(len(fmap_r)):
+ # print(i,j,y.shape,y_hat.shape,fmap_r[j].shape,fmap_g[j].shape)
+ y_d_rs.append(y_d_r)
+ y_d_gs.append(y_d_g)
+ fmap_rs.append(fmap_r)
+ fmap_gs.append(fmap_g)
+
+ return y_d_rs, y_d_gs, fmap_rs, fmap_gs
+
+
+class MultiPeriodDiscriminatorV2(torch.nn.Module):
+ def __init__(self, use_spectral_norm=False):
+ super(MultiPeriodDiscriminatorV2, self).__init__()
+ # periods = [2, 3, 5, 7, 11, 17]
+ periods = [2, 3, 5, 7, 11, 17, 23, 37]
+
+ discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)]
+ discs = discs + [
+ DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods
+ ]
+ self.discriminators = nn.ModuleList(discs)
+
+ def forward(self, y, y_hat):
+ y_d_rs = [] #
+ y_d_gs = []
+ fmap_rs = []
+ fmap_gs = []
+ for i, d in enumerate(self.discriminators):
+ y_d_r, fmap_r = d(y)
+ y_d_g, fmap_g = d(y_hat)
+ # for j in range(len(fmap_r)):
+ # print(i,j,y.shape,y_hat.shape,fmap_r[j].shape,fmap_g[j].shape)
+ y_d_rs.append(y_d_r)
+ y_d_gs.append(y_d_g)
+ fmap_rs.append(fmap_r)
+ fmap_gs.append(fmap_g)
+
+ return y_d_rs, y_d_gs, fmap_rs, fmap_gs
+
+
+class DiscriminatorS(torch.nn.Module):
+ def __init__(self, use_spectral_norm=False):
+ super(DiscriminatorS, self).__init__()
+ norm_f = weight_norm if use_spectral_norm == False else spectral_norm
+ self.convs = nn.ModuleList(
+ [
+ norm_f(Conv1d(1, 16, 15, 1, padding=7)),
+ norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)),
+ norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)),
+ norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)),
+ norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)),
+ norm_f(Conv1d(1024, 1024, 5, 1, padding=2)),
+ ]
+ )
+ self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1))
+
+ def forward(self, x):
+ fmap = []
+
+ for l in self.convs:
+ x = l(x)
+ x = F.leaky_relu(x, modules.LRELU_SLOPE)
+ fmap.append(x)
+ x = self.conv_post(x)
+ fmap.append(x)
+ x = torch.flatten(x, 1, -1)
+
+ return x, fmap
+
+
+class DiscriminatorP(torch.nn.Module):
+ def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False):
+ super(DiscriminatorP, self).__init__()
+ self.period = period
+ self.use_spectral_norm = use_spectral_norm
+ norm_f = weight_norm if use_spectral_norm == False else spectral_norm
+ self.convs = nn.ModuleList(
+ [
+ norm_f(
+ Conv2d(
+ 1,
+ 32,
+ (kernel_size, 1),
+ (stride, 1),
+ padding=(get_padding(kernel_size, 1), 0),
+ )
+ ),
+ norm_f(
+ Conv2d(
+ 32,
+ 128,
+ (kernel_size, 1),
+ (stride, 1),
+ padding=(get_padding(kernel_size, 1), 0),
+ )
+ ),
+ norm_f(
+ Conv2d(
+ 128,
+ 512,
+ (kernel_size, 1),
+ (stride, 1),
+ padding=(get_padding(kernel_size, 1), 0),
+ )
+ ),
+ norm_f(
+ Conv2d(
+ 512,
+ 1024,
+ (kernel_size, 1),
+ (stride, 1),
+ padding=(get_padding(kernel_size, 1), 0),
+ )
+ ),
+ norm_f(
+ Conv2d(
+ 1024,
+ 1024,
+ (kernel_size, 1),
+ 1,
+ padding=(get_padding(kernel_size, 1), 0),
+ )
+ ),
+ ]
+ )
+ self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0)))
+
+ def forward(self, x):
+ fmap = []
+
+ # 1d to 2d
+ b, c, t = x.shape
+ if t % self.period != 0: # pad first
+ n_pad = self.period - (t % self.period)
+ x = F.pad(x, (0, n_pad), "reflect")
+ t = t + n_pad
+ x = x.view(b, c, t // self.period, self.period)
+
+ for l in self.convs:
+ x = l(x)
+ x = F.leaky_relu(x, modules.LRELU_SLOPE)
+ fmap.append(x)
+ x = self.conv_post(x)
+ fmap.append(x)
+ x = torch.flatten(x, 1, -1)
+
+ return x, fmap
diff --git a/Waifu-Anime-RCV/lib/infer_pack/modules.py b/Waifu-Anime-RCV/lib/infer_pack/modules.py
new file mode 100644
index 0000000000000000000000000000000000000000..c83289df7c79a4810dacd15c050148544ba0b6a9
--- /dev/null
+++ b/Waifu-Anime-RCV/lib/infer_pack/modules.py
@@ -0,0 +1,522 @@
+import copy
+import math
+import numpy as np
+import scipy
+import torch
+from torch import nn
+from torch.nn import functional as F
+
+from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d
+from torch.nn.utils import weight_norm, remove_weight_norm
+
+from lib.infer_pack import commons
+from lib.infer_pack.commons import init_weights, get_padding
+from lib.infer_pack.transforms import piecewise_rational_quadratic_transform
+
+
+LRELU_SLOPE = 0.1
+
+
+class LayerNorm(nn.Module):
+ def __init__(self, channels, eps=1e-5):
+ super().__init__()
+ self.channels = channels
+ self.eps = eps
+
+ self.gamma = nn.Parameter(torch.ones(channels))
+ self.beta = nn.Parameter(torch.zeros(channels))
+
+ def forward(self, x):
+ x = x.transpose(1, -1)
+ x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps)
+ return x.transpose(1, -1)
+
+
+class ConvReluNorm(nn.Module):
+ def __init__(
+ self,
+ in_channels,
+ hidden_channels,
+ out_channels,
+ kernel_size,
+ n_layers,
+ p_dropout,
+ ):
+ super().__init__()
+ self.in_channels = in_channels
+ self.hidden_channels = hidden_channels
+ self.out_channels = out_channels
+ self.kernel_size = kernel_size
+ self.n_layers = n_layers
+ self.p_dropout = p_dropout
+ assert n_layers > 1, "Number of layers should be larger than 0."
+
+ self.conv_layers = nn.ModuleList()
+ self.norm_layers = nn.ModuleList()
+ self.conv_layers.append(
+ nn.Conv1d(
+ in_channels, hidden_channels, kernel_size, padding=kernel_size // 2
+ )
+ )
+ self.norm_layers.append(LayerNorm(hidden_channels))
+ self.relu_drop = nn.Sequential(nn.ReLU(), nn.Dropout(p_dropout))
+ for _ in range(n_layers - 1):
+ self.conv_layers.append(
+ nn.Conv1d(
+ hidden_channels,
+ hidden_channels,
+ kernel_size,
+ padding=kernel_size // 2,
+ )
+ )
+ self.norm_layers.append(LayerNorm(hidden_channels))
+ self.proj = nn.Conv1d(hidden_channels, out_channels, 1)
+ self.proj.weight.data.zero_()
+ self.proj.bias.data.zero_()
+
+ def forward(self, x, x_mask):
+ x_org = x
+ for i in range(self.n_layers):
+ x = self.conv_layers[i](x * x_mask)
+ x = self.norm_layers[i](x)
+ x = self.relu_drop(x)
+ x = x_org + self.proj(x)
+ return x * x_mask
+
+
+class DDSConv(nn.Module):
+ """
+ Dialted and Depth-Separable Convolution
+ """
+
+ def __init__(self, channels, kernel_size, n_layers, p_dropout=0.0):
+ super().__init__()
+ self.channels = channels
+ self.kernel_size = kernel_size
+ self.n_layers = n_layers
+ self.p_dropout = p_dropout
+
+ self.drop = nn.Dropout(p_dropout)
+ self.convs_sep = nn.ModuleList()
+ self.convs_1x1 = nn.ModuleList()
+ self.norms_1 = nn.ModuleList()
+ self.norms_2 = nn.ModuleList()
+ for i in range(n_layers):
+ dilation = kernel_size**i
+ padding = (kernel_size * dilation - dilation) // 2
+ self.convs_sep.append(
+ nn.Conv1d(
+ channels,
+ channels,
+ kernel_size,
+ groups=channels,
+ dilation=dilation,
+ padding=padding,
+ )
+ )
+ self.convs_1x1.append(nn.Conv1d(channels, channels, 1))
+ self.norms_1.append(LayerNorm(channels))
+ self.norms_2.append(LayerNorm(channels))
+
+ def forward(self, x, x_mask, g=None):
+ if g is not None:
+ x = x + g
+ for i in range(self.n_layers):
+ y = self.convs_sep[i](x * x_mask)
+ y = self.norms_1[i](y)
+ y = F.gelu(y)
+ y = self.convs_1x1[i](y)
+ y = self.norms_2[i](y)
+ y = F.gelu(y)
+ y = self.drop(y)
+ x = x + y
+ return x * x_mask
+
+
+class WN(torch.nn.Module):
+ def __init__(
+ self,
+ hidden_channels,
+ kernel_size,
+ dilation_rate,
+ n_layers,
+ gin_channels=0,
+ p_dropout=0,
+ ):
+ super(WN, self).__init__()
+ assert kernel_size % 2 == 1
+ self.hidden_channels = hidden_channels
+ self.kernel_size = (kernel_size,)
+ self.dilation_rate = dilation_rate
+ self.n_layers = n_layers
+ self.gin_channels = gin_channels
+ self.p_dropout = p_dropout
+
+ self.in_layers = torch.nn.ModuleList()
+ self.res_skip_layers = torch.nn.ModuleList()
+ self.drop = nn.Dropout(p_dropout)
+
+ if gin_channels != 0:
+ cond_layer = torch.nn.Conv1d(
+ gin_channels, 2 * hidden_channels * n_layers, 1
+ )
+ self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name="weight")
+
+ for i in range(n_layers):
+ dilation = dilation_rate**i
+ padding = int((kernel_size * dilation - dilation) / 2)
+ in_layer = torch.nn.Conv1d(
+ hidden_channels,
+ 2 * hidden_channels,
+ kernel_size,
+ dilation=dilation,
+ padding=padding,
+ )
+ in_layer = torch.nn.utils.weight_norm(in_layer, name="weight")
+ self.in_layers.append(in_layer)
+
+ # last one is not necessary
+ if i < n_layers - 1:
+ res_skip_channels = 2 * hidden_channels
+ else:
+ res_skip_channels = hidden_channels
+
+ res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1)
+ res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name="weight")
+ self.res_skip_layers.append(res_skip_layer)
+
+ def forward(self, x, x_mask, g=None, **kwargs):
+ output = torch.zeros_like(x)
+ n_channels_tensor = torch.IntTensor([self.hidden_channels])
+
+ if g is not None:
+ g = self.cond_layer(g)
+
+ for i in range(self.n_layers):
+ x_in = self.in_layers[i](x)
+ if g is not None:
+ cond_offset = i * 2 * self.hidden_channels
+ g_l = g[:, cond_offset : cond_offset + 2 * self.hidden_channels, :]
+ else:
+ g_l = torch.zeros_like(x_in)
+
+ acts = commons.fused_add_tanh_sigmoid_multiply(x_in, g_l, n_channels_tensor)
+ acts = self.drop(acts)
+
+ res_skip_acts = self.res_skip_layers[i](acts)
+ if i < self.n_layers - 1:
+ res_acts = res_skip_acts[:, : self.hidden_channels, :]
+ x = (x + res_acts) * x_mask
+ output = output + res_skip_acts[:, self.hidden_channels :, :]
+ else:
+ output = output + res_skip_acts
+ return output * x_mask
+
+ def remove_weight_norm(self):
+ if self.gin_channels != 0:
+ torch.nn.utils.remove_weight_norm(self.cond_layer)
+ for l in self.in_layers:
+ torch.nn.utils.remove_weight_norm(l)
+ for l in self.res_skip_layers:
+ torch.nn.utils.remove_weight_norm(l)
+
+
+class ResBlock1(torch.nn.Module):
+ def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)):
+ super(ResBlock1, self).__init__()
+ self.convs1 = nn.ModuleList(
+ [
+ weight_norm(
+ Conv1d(
+ channels,
+ channels,
+ kernel_size,
+ 1,
+ dilation=dilation[0],
+ padding=get_padding(kernel_size, dilation[0]),
+ )
+ ),
+ weight_norm(
+ Conv1d(
+ channels,
+ channels,
+ kernel_size,
+ 1,
+ dilation=dilation[1],
+ padding=get_padding(kernel_size, dilation[1]),
+ )
+ ),
+ weight_norm(
+ Conv1d(
+ channels,
+ channels,
+ kernel_size,
+ 1,
+ dilation=dilation[2],
+ padding=get_padding(kernel_size, dilation[2]),
+ )
+ ),
+ ]
+ )
+ self.convs1.apply(init_weights)
+
+ self.convs2 = nn.ModuleList(
+ [
+ weight_norm(
+ Conv1d(
+ channels,
+ channels,
+ kernel_size,
+ 1,
+ dilation=1,
+ padding=get_padding(kernel_size, 1),
+ )
+ ),
+ weight_norm(
+ Conv1d(
+ channels,
+ channels,
+ kernel_size,
+ 1,
+ dilation=1,
+ padding=get_padding(kernel_size, 1),
+ )
+ ),
+ weight_norm(
+ Conv1d(
+ channels,
+ channels,
+ kernel_size,
+ 1,
+ dilation=1,
+ padding=get_padding(kernel_size, 1),
+ )
+ ),
+ ]
+ )
+ self.convs2.apply(init_weights)
+
+ def forward(self, x, x_mask=None):
+ for c1, c2 in zip(self.convs1, self.convs2):
+ xt = F.leaky_relu(x, LRELU_SLOPE)
+ if x_mask is not None:
+ xt = xt * x_mask
+ xt = c1(xt)
+ xt = F.leaky_relu(xt, LRELU_SLOPE)
+ if x_mask is not None:
+ xt = xt * x_mask
+ xt = c2(xt)
+ x = xt + x
+ if x_mask is not None:
+ x = x * x_mask
+ return x
+
+ def remove_weight_norm(self):
+ for l in self.convs1:
+ remove_weight_norm(l)
+ for l in self.convs2:
+ remove_weight_norm(l)
+
+
+class ResBlock2(torch.nn.Module):
+ def __init__(self, channels, kernel_size=3, dilation=(1, 3)):
+ super(ResBlock2, self).__init__()
+ self.convs = nn.ModuleList(
+ [
+ weight_norm(
+ Conv1d(
+ channels,
+ channels,
+ kernel_size,
+ 1,
+ dilation=dilation[0],
+ padding=get_padding(kernel_size, dilation[0]),
+ )
+ ),
+ weight_norm(
+ Conv1d(
+ channels,
+ channels,
+ kernel_size,
+ 1,
+ dilation=dilation[1],
+ padding=get_padding(kernel_size, dilation[1]),
+ )
+ ),
+ ]
+ )
+ self.convs.apply(init_weights)
+
+ def forward(self, x, x_mask=None):
+ for c in self.convs:
+ xt = F.leaky_relu(x, LRELU_SLOPE)
+ if x_mask is not None:
+ xt = xt * x_mask
+ xt = c(xt)
+ x = xt + x
+ if x_mask is not None:
+ x = x * x_mask
+ return x
+
+ def remove_weight_norm(self):
+ for l in self.convs:
+ remove_weight_norm(l)
+
+
+class Log(nn.Module):
+ def forward(self, x, x_mask, reverse=False, **kwargs):
+ if not reverse:
+ y = torch.log(torch.clamp_min(x, 1e-5)) * x_mask
+ logdet = torch.sum(-y, [1, 2])
+ return y, logdet
+ else:
+ x = torch.exp(x) * x_mask
+ return x
+
+
+class Flip(nn.Module):
+ def forward(self, x, *args, reverse=False, **kwargs):
+ x = torch.flip(x, [1])
+ if not reverse:
+ logdet = torch.zeros(x.size(0)).to(dtype=x.dtype, device=x.device)
+ return x, logdet
+ else:
+ return x
+
+
+class ElementwiseAffine(nn.Module):
+ def __init__(self, channels):
+ super().__init__()
+ self.channels = channels
+ self.m = nn.Parameter(torch.zeros(channels, 1))
+ self.logs = nn.Parameter(torch.zeros(channels, 1))
+
+ def forward(self, x, x_mask, reverse=False, **kwargs):
+ if not reverse:
+ y = self.m + torch.exp(self.logs) * x
+ y = y * x_mask
+ logdet = torch.sum(self.logs * x_mask, [1, 2])
+ return y, logdet
+ else:
+ x = (x - self.m) * torch.exp(-self.logs) * x_mask
+ return x
+
+
+class ResidualCouplingLayer(nn.Module):
+ def __init__(
+ self,
+ channels,
+ hidden_channels,
+ kernel_size,
+ dilation_rate,
+ n_layers,
+ p_dropout=0,
+ gin_channels=0,
+ mean_only=False,
+ ):
+ assert channels % 2 == 0, "channels should be divisible by 2"
+ super().__init__()
+ self.channels = channels
+ self.hidden_channels = hidden_channels
+ self.kernel_size = kernel_size
+ self.dilation_rate = dilation_rate
+ self.n_layers = n_layers
+ self.half_channels = channels // 2
+ self.mean_only = mean_only
+
+ self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1)
+ self.enc = WN(
+ hidden_channels,
+ kernel_size,
+ dilation_rate,
+ n_layers,
+ p_dropout=p_dropout,
+ gin_channels=gin_channels,
+ )
+ self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1)
+ self.post.weight.data.zero_()
+ self.post.bias.data.zero_()
+
+ def forward(self, x, x_mask, g=None, reverse=False):
+ x0, x1 = torch.split(x, [self.half_channels] * 2, 1)
+ h = self.pre(x0) * x_mask
+ h = self.enc(h, x_mask, g=g)
+ stats = self.post(h) * x_mask
+ if not self.mean_only:
+ m, logs = torch.split(stats, [self.half_channels] * 2, 1)
+ else:
+ m = stats
+ logs = torch.zeros_like(m)
+
+ if not reverse:
+ x1 = m + x1 * torch.exp(logs) * x_mask
+ x = torch.cat([x0, x1], 1)
+ logdet = torch.sum(logs, [1, 2])
+ return x, logdet
+ else:
+ x1 = (x1 - m) * torch.exp(-logs) * x_mask
+ x = torch.cat([x0, x1], 1)
+ return x
+
+ def remove_weight_norm(self):
+ self.enc.remove_weight_norm()
+
+
+class ConvFlow(nn.Module):
+ def __init__(
+ self,
+ in_channels,
+ filter_channels,
+ kernel_size,
+ n_layers,
+ num_bins=10,
+ tail_bound=5.0,
+ ):
+ super().__init__()
+ self.in_channels = in_channels
+ self.filter_channels = filter_channels
+ self.kernel_size = kernel_size
+ self.n_layers = n_layers
+ self.num_bins = num_bins
+ self.tail_bound = tail_bound
+ self.half_channels = in_channels // 2
+
+ self.pre = nn.Conv1d(self.half_channels, filter_channels, 1)
+ self.convs = DDSConv(filter_channels, kernel_size, n_layers, p_dropout=0.0)
+ self.proj = nn.Conv1d(
+ filter_channels, self.half_channels * (num_bins * 3 - 1), 1
+ )
+ self.proj.weight.data.zero_()
+ self.proj.bias.data.zero_()
+
+ def forward(self, x, x_mask, g=None, reverse=False):
+ x0, x1 = torch.split(x, [self.half_channels] * 2, 1)
+ h = self.pre(x0)
+ h = self.convs(h, x_mask, g=g)
+ h = self.proj(h) * x_mask
+
+ b, c, t = x0.shape
+ h = h.reshape(b, c, -1, t).permute(0, 1, 3, 2) # [b, cx?, t] -> [b, c, t, ?]
+
+ unnormalized_widths = h[..., : self.num_bins] / math.sqrt(self.filter_channels)
+ unnormalized_heights = h[..., self.num_bins : 2 * self.num_bins] / math.sqrt(
+ self.filter_channels
+ )
+ unnormalized_derivatives = h[..., 2 * self.num_bins :]
+
+ x1, logabsdet = piecewise_rational_quadratic_transform(
+ x1,
+ unnormalized_widths,
+ unnormalized_heights,
+ unnormalized_derivatives,
+ inverse=reverse,
+ tails="linear",
+ tail_bound=self.tail_bound,
+ )
+
+ x = torch.cat([x0, x1], 1) * x_mask
+ logdet = torch.sum(logabsdet * x_mask, [1, 2])
+ if not reverse:
+ return x, logdet
+ else:
+ return x
diff --git a/Waifu-Anime-RCV/lib/infer_pack/modules/F0Predictor/DioF0Predictor.py b/Waifu-Anime-RCV/lib/infer_pack/modules/F0Predictor/DioF0Predictor.py
new file mode 100644
index 0000000000000000000000000000000000000000..ee3171bcb7c4a5066560723108b56e055f18be45
--- /dev/null
+++ b/Waifu-Anime-RCV/lib/infer_pack/modules/F0Predictor/DioF0Predictor.py
@@ -0,0 +1,90 @@
+from lib.infer_pack.modules.F0Predictor.F0Predictor import F0Predictor
+import pyworld
+import numpy as np
+
+
+class DioF0Predictor(F0Predictor):
+ def __init__(self, hop_length=512, f0_min=50, f0_max=1100, sampling_rate=44100):
+ self.hop_length = hop_length
+ self.f0_min = f0_min
+ self.f0_max = f0_max
+ self.sampling_rate = sampling_rate
+
+ def interpolate_f0(self, f0):
+ """
+ 对F0进行插值处理
+ """
+
+ data = np.reshape(f0, (f0.size, 1))
+
+ vuv_vector = np.zeros((data.size, 1), dtype=np.float32)
+ vuv_vector[data > 0.0] = 1.0
+ vuv_vector[data <= 0.0] = 0.0
+
+ ip_data = data
+
+ frame_number = data.size
+ last_value = 0.0
+ for i in range(frame_number):
+ if data[i] <= 0.0:
+ j = i + 1
+ for j in range(i + 1, frame_number):
+ if data[j] > 0.0:
+ break
+ if j < frame_number - 1:
+ if last_value > 0.0:
+ step = (data[j] - data[i - 1]) / float(j - i)
+ for k in range(i, j):
+ ip_data[k] = data[i - 1] + step * (k - i + 1)
+ else:
+ for k in range(i, j):
+ ip_data[k] = data[j]
+ else:
+ for k in range(i, frame_number):
+ ip_data[k] = last_value
+ else:
+ ip_data[i] = data[i] # 这里可能存在一个没有必要的拷贝
+ last_value = data[i]
+
+ return ip_data[:, 0], vuv_vector[:, 0]
+
+ def resize_f0(self, x, target_len):
+ source = np.array(x)
+ source[source < 0.001] = np.nan
+ target = np.interp(
+ np.arange(0, len(source) * target_len, len(source)) / target_len,
+ np.arange(0, len(source)),
+ source,
+ )
+ res = np.nan_to_num(target)
+ return res
+
+ def compute_f0(self, wav, p_len=None):
+ if p_len is None:
+ p_len = wav.shape[0] // self.hop_length
+ f0, t = pyworld.dio(
+ wav.astype(np.double),
+ fs=self.sampling_rate,
+ f0_floor=self.f0_min,
+ f0_ceil=self.f0_max,
+ frame_period=1000 * self.hop_length / self.sampling_rate,
+ )
+ f0 = pyworld.stonemask(wav.astype(np.double), f0, t, self.sampling_rate)
+ for index, pitch in enumerate(f0):
+ f0[index] = round(pitch, 1)
+ return self.interpolate_f0(self.resize_f0(f0, p_len))[0]
+
+ def compute_f0_uv(self, wav, p_len=None):
+ if p_len is None:
+ p_len = wav.shape[0] // self.hop_length
+ f0, t = pyworld.dio(
+ wav.astype(np.double),
+ fs=self.sampling_rate,
+ f0_floor=self.f0_min,
+ f0_ceil=self.f0_max,
+ frame_period=1000 * self.hop_length / self.sampling_rate,
+ )
+ f0 = pyworld.stonemask(wav.astype(np.double), f0, t, self.sampling_rate)
+ for index, pitch in enumerate(f0):
+ f0[index] = round(pitch, 1)
+ return self.interpolate_f0(self.resize_f0(f0, p_len))
diff --git a/Waifu-Anime-RCV/lib/infer_pack/modules/F0Predictor/F0Predictor.py b/Waifu-Anime-RCV/lib/infer_pack/modules/F0Predictor/F0Predictor.py
new file mode 100644
index 0000000000000000000000000000000000000000..f56e49e7f0e6eab3babf0711cae2933371b9f9cc
--- /dev/null
+++ b/Waifu-Anime-RCV/lib/infer_pack/modules/F0Predictor/F0Predictor.py
@@ -0,0 +1,16 @@
+class F0Predictor(object):
+ def compute_f0(self, wav, p_len):
+ """
+ input: wav:[signal_length]
+ p_len:int
+ output: f0:[signal_length//hop_length]
+ """
+ pass
+
+ def compute_f0_uv(self, wav, p_len):
+ """
+ input: wav:[signal_length]
+ p_len:int
+ output: f0:[signal_length//hop_length],uv:[signal_length//hop_length]
+ """
+ pass
diff --git a/Waifu-Anime-RCV/lib/infer_pack/modules/F0Predictor/HarvestF0Predictor.py b/Waifu-Anime-RCV/lib/infer_pack/modules/F0Predictor/HarvestF0Predictor.py
new file mode 100644
index 0000000000000000000000000000000000000000..b412ba2814e114ca7bb00b6fd6ef217f63d788a3
--- /dev/null
+++ b/Waifu-Anime-RCV/lib/infer_pack/modules/F0Predictor/HarvestF0Predictor.py
@@ -0,0 +1,86 @@
+from lib.infer_pack.modules.F0Predictor.F0Predictor import F0Predictor
+import pyworld
+import numpy as np
+
+
+class HarvestF0Predictor(F0Predictor):
+ def __init__(self, hop_length=512, f0_min=50, f0_max=1100, sampling_rate=44100):
+ self.hop_length = hop_length
+ self.f0_min = f0_min
+ self.f0_max = f0_max
+ self.sampling_rate = sampling_rate
+
+ def interpolate_f0(self, f0):
+ """
+ 对F0进行插值处理
+ """
+
+ data = np.reshape(f0, (f0.size, 1))
+
+ vuv_vector = np.zeros((data.size, 1), dtype=np.float32)
+ vuv_vector[data > 0.0] = 1.0
+ vuv_vector[data <= 0.0] = 0.0
+
+ ip_data = data
+
+ frame_number = data.size
+ last_value = 0.0
+ for i in range(frame_number):
+ if data[i] <= 0.0:
+ j = i + 1
+ for j in range(i + 1, frame_number):
+ if data[j] > 0.0:
+ break
+ if j < frame_number - 1:
+ if last_value > 0.0:
+ step = (data[j] - data[i - 1]) / float(j - i)
+ for k in range(i, j):
+ ip_data[k] = data[i - 1] + step * (k - i + 1)
+ else:
+ for k in range(i, j):
+ ip_data[k] = data[j]
+ else:
+ for k in range(i, frame_number):
+ ip_data[k] = last_value
+ else:
+ ip_data[i] = data[i] # 这里可能存在一个没有必要的拷贝
+ last_value = data[i]
+
+ return ip_data[:, 0], vuv_vector[:, 0]
+
+ def resize_f0(self, x, target_len):
+ source = np.array(x)
+ source[source < 0.001] = np.nan
+ target = np.interp(
+ np.arange(0, len(source) * target_len, len(source)) / target_len,
+ np.arange(0, len(source)),
+ source,
+ )
+ res = np.nan_to_num(target)
+ return res
+
+ def compute_f0(self, wav, p_len=None):
+ if p_len is None:
+ p_len = wav.shape[0] // self.hop_length
+ f0, t = pyworld.harvest(
+ wav.astype(np.double),
+ fs=self.hop_length,
+ f0_ceil=self.f0_max,
+ f0_floor=self.f0_min,
+ frame_period=1000 * self.hop_length / self.sampling_rate,
+ )
+ f0 = pyworld.stonemask(wav.astype(np.double), f0, t, self.fs)
+ return self.interpolate_f0(self.resize_f0(f0, p_len))[0]
+
+ def compute_f0_uv(self, wav, p_len=None):
+ if p_len is None:
+ p_len = wav.shape[0] // self.hop_length
+ f0, t = pyworld.harvest(
+ wav.astype(np.double),
+ fs=self.sampling_rate,
+ f0_floor=self.f0_min,
+ f0_ceil=self.f0_max,
+ frame_period=1000 * self.hop_length / self.sampling_rate,
+ )
+ f0 = pyworld.stonemask(wav.astype(np.double), f0, t, self.sampling_rate)
+ return self.interpolate_f0(self.resize_f0(f0, p_len))
diff --git a/Waifu-Anime-RCV/lib/infer_pack/modules/F0Predictor/PMF0Predictor.py b/Waifu-Anime-RCV/lib/infer_pack/modules/F0Predictor/PMF0Predictor.py
new file mode 100644
index 0000000000000000000000000000000000000000..b2c592527a5966e6f8e79e8c52dc5b414246dcc6
--- /dev/null
+++ b/Waifu-Anime-RCV/lib/infer_pack/modules/F0Predictor/PMF0Predictor.py
@@ -0,0 +1,97 @@
+from lib.infer_pack.modules.F0Predictor.F0Predictor import F0Predictor
+import parselmouth
+import numpy as np
+
+
+class PMF0Predictor(F0Predictor):
+ def __init__(self, hop_length=512, f0_min=50, f0_max=1100, sampling_rate=44100):
+ self.hop_length = hop_length
+ self.f0_min = f0_min
+ self.f0_max = f0_max
+ self.sampling_rate = sampling_rate
+
+ def interpolate_f0(self, f0):
+ """
+ 对F0进行插值处理
+ """
+
+ data = np.reshape(f0, (f0.size, 1))
+
+ vuv_vector = np.zeros((data.size, 1), dtype=np.float32)
+ vuv_vector[data > 0.0] = 1.0
+ vuv_vector[data <= 0.0] = 0.0
+
+ ip_data = data
+
+ frame_number = data.size
+ last_value = 0.0
+ for i in range(frame_number):
+ if data[i] <= 0.0:
+ j = i + 1
+ for j in range(i + 1, frame_number):
+ if data[j] > 0.0:
+ break
+ if j < frame_number - 1:
+ if last_value > 0.0:
+ step = (data[j] - data[i - 1]) / float(j - i)
+ for k in range(i, j):
+ ip_data[k] = data[i - 1] + step * (k - i + 1)
+ else:
+ for k in range(i, j):
+ ip_data[k] = data[j]
+ else:
+ for k in range(i, frame_number):
+ ip_data[k] = last_value
+ else:
+ ip_data[i] = data[i] # 这里可能存在一个没有必要的拷贝
+ last_value = data[i]
+
+ return ip_data[:, 0], vuv_vector[:, 0]
+
+ def compute_f0(self, wav, p_len=None):
+ x = wav
+ if p_len is None:
+ p_len = x.shape[0] // self.hop_length
+ else:
+ assert abs(p_len - x.shape[0] // self.hop_length) < 4, "pad length error"
+ time_step = self.hop_length / self.sampling_rate * 1000
+ f0 = (
+ parselmouth.Sound(x, self.sampling_rate)
+ .to_pitch_ac(
+ time_step=time_step / 1000,
+ voicing_threshold=0.6,
+ pitch_floor=self.f0_min,
+ pitch_ceiling=self.f0_max,
+ )
+ .selected_array["frequency"]
+ )
+
+ pad_size = (p_len - len(f0) + 1) // 2
+ if pad_size > 0 or p_len - len(f0) - pad_size > 0:
+ f0 = np.pad(f0, [[pad_size, p_len - len(f0) - pad_size]], mode="constant")
+ f0, uv = self.interpolate_f0(f0)
+ return f0
+
+ def compute_f0_uv(self, wav, p_len=None):
+ x = wav
+ if p_len is None:
+ p_len = x.shape[0] // self.hop_length
+ else:
+ assert abs(p_len - x.shape[0] // self.hop_length) < 4, "pad length error"
+ time_step = self.hop_length / self.sampling_rate * 1000
+ f0 = (
+ parselmouth.Sound(x, self.sampling_rate)
+ .to_pitch_ac(
+ time_step=time_step / 1000,
+ voicing_threshold=0.6,
+ pitch_floor=self.f0_min,
+ pitch_ceiling=self.f0_max,
+ )
+ .selected_array["frequency"]
+ )
+
+ pad_size = (p_len - len(f0) + 1) // 2
+ if pad_size > 0 or p_len - len(f0) - pad_size > 0:
+ f0 = np.pad(f0, [[pad_size, p_len - len(f0) - pad_size]], mode="constant")
+ f0, uv = self.interpolate_f0(f0)
+ return f0, uv
diff --git a/Waifu-Anime-RCV/lib/infer_pack/modules/F0Predictor/__init__.py b/Waifu-Anime-RCV/lib/infer_pack/modules/F0Predictor/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/Waifu-Anime-RCV/lib/infer_pack/onnx_inference.py b/Waifu-Anime-RCV/lib/infer_pack/onnx_inference.py
new file mode 100644
index 0000000000000000000000000000000000000000..6517853be49e61c427cf7cd9b5ed203f6d5f367e
--- /dev/null
+++ b/Waifu-Anime-RCV/lib/infer_pack/onnx_inference.py
@@ -0,0 +1,145 @@
+import onnxruntime
+import librosa
+import numpy as np
+import soundfile
+
+
+class ContentVec:
+ def __init__(self, vec_path="pretrained/vec-768-layer-12.onnx", device=None):
+ print("load model(s) from {}".format(vec_path))
+ if device == "cpu" or device is None:
+ providers = ["CPUExecutionProvider"]
+ elif device == "cuda":
+ providers = ["CUDAExecutionProvider", "CPUExecutionProvider"]
+ elif device == "dml":
+ providers = ["DmlExecutionProvider"]
+ else:
+ raise RuntimeError("Unsportted Device")
+ self.model = onnxruntime.InferenceSession(vec_path, providers=providers)
+
+ def __call__(self, wav):
+ return self.forward(wav)
+
+ def forward(self, wav):
+ feats = wav
+ if feats.ndim == 2: # double channels
+ feats = feats.mean(-1)
+ assert feats.ndim == 1, feats.ndim
+ feats = np.expand_dims(np.expand_dims(feats, 0), 0)
+ onnx_input = {self.model.get_inputs()[0].name: feats}
+ logits = self.model.run(None, onnx_input)[0]
+ return logits.transpose(0, 2, 1)
+
+
+def get_f0_predictor(f0_predictor, hop_length, sampling_rate, **kargs):
+ if f0_predictor == "pm":
+ from lib.infer_pack.modules.F0Predictor.PMF0Predictor import PMF0Predictor
+
+ f0_predictor_object = PMF0Predictor(
+ hop_length=hop_length, sampling_rate=sampling_rate
+ )
+ elif f0_predictor == "harvest":
+ from lib.infer_pack.modules.F0Predictor.HarvestF0Predictor import (
+ HarvestF0Predictor,
+ )
+
+ f0_predictor_object = HarvestF0Predictor(
+ hop_length=hop_length, sampling_rate=sampling_rate
+ )
+ elif f0_predictor == "dio":
+ from lib.infer_pack.modules.F0Predictor.DioF0Predictor import DioF0Predictor
+
+ f0_predictor_object = DioF0Predictor(
+ hop_length=hop_length, sampling_rate=sampling_rate
+ )
+ else:
+ raise Exception("Unknown f0 predictor")
+ return f0_predictor_object
+
+
+class OnnxRVC:
+ def __init__(
+ self,
+ model_path,
+ sr=40000,
+ hop_size=512,
+ vec_path="vec-768-layer-12",
+ device="cpu",
+ ):
+ vec_path = f"pretrained/{vec_path}.onnx"
+ self.vec_model = ContentVec(vec_path, device)
+ if device == "cpu" or device is None:
+ providers = ["CPUExecutionProvider"]
+ elif device == "cuda":
+ providers = ["CUDAExecutionProvider", "CPUExecutionProvider"]
+ elif device == "dml":
+ providers = ["DmlExecutionProvider"]
+ else:
+ raise RuntimeError("Unsportted Device")
+ self.model = onnxruntime.InferenceSession(model_path, providers=providers)
+ self.sampling_rate = sr
+ self.hop_size = hop_size
+
+ def forward(self, hubert, hubert_length, pitch, pitchf, ds, rnd):
+ onnx_input = {
+ self.model.get_inputs()[0].name: hubert,
+ self.model.get_inputs()[1].name: hubert_length,
+ self.model.get_inputs()[2].name: pitch,
+ self.model.get_inputs()[3].name: pitchf,
+ self.model.get_inputs()[4].name: ds,
+ self.model.get_inputs()[5].name: rnd,
+ }
+ return (self.model.run(None, onnx_input)[0] * 32767).astype(np.int16)
+
+ def inference(
+ self,
+ raw_path,
+ sid,
+ f0_method="dio",
+ f0_up_key=0,
+ pad_time=0.5,
+ cr_threshold=0.02,
+ ):
+ f0_min = 50
+ f0_max = 1100
+ f0_mel_min = 1127 * np.log(1 + f0_min / 700)
+ f0_mel_max = 1127 * np.log(1 + f0_max / 700)
+ f0_predictor = get_f0_predictor(
+ f0_method,
+ hop_length=self.hop_size,
+ sampling_rate=self.sampling_rate,
+ threshold=cr_threshold,
+ )
+ wav, sr = librosa.load(raw_path, sr=self.sampling_rate)
+ org_length = len(wav)
+ if org_length / sr > 50.0:
+ raise RuntimeError("Reached Max Length")
+
+ wav16k = librosa.resample(wav, orig_sr=self.sampling_rate, target_sr=16000)
+ wav16k = wav16k
+
+ hubert = self.vec_model(wav16k)
+ hubert = np.repeat(hubert, 2, axis=2).transpose(0, 2, 1).astype(np.float32)
+ hubert_length = hubert.shape[1]
+
+ pitchf = f0_predictor.compute_f0(wav, hubert_length)
+ pitchf = pitchf * 2 ** (f0_up_key / 12)
+ pitch = pitchf.copy()
+ f0_mel = 1127 * np.log(1 + pitch / 700)
+ f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - f0_mel_min) * 254 / (
+ f0_mel_max - f0_mel_min
+ ) + 1
+ f0_mel[f0_mel <= 1] = 1
+ f0_mel[f0_mel > 255] = 255
+ pitch = np.rint(f0_mel).astype(np.int64)
+
+ pitchf = pitchf.reshape(1, len(pitchf)).astype(np.float32)
+ pitch = pitch.reshape(1, len(pitch))
+ ds = np.array([sid]).astype(np.int64)
+
+ rnd = np.random.randn(1, 192, hubert_length).astype(np.float32)
+ hubert_length = np.array([hubert_length]).astype(np.int64)
+
+ out_wav = self.forward(hubert, hubert_length, pitch, pitchf, ds, rnd).squeeze()
+ out_wav = np.pad(out_wav, (0, 2 * self.hop_size), "constant")
+ return out_wav[0:org_length]
diff --git a/Waifu-Anime-RCV/lib/infer_pack/transforms.py b/Waifu-Anime-RCV/lib/infer_pack/transforms.py
new file mode 100644
index 0000000000000000000000000000000000000000..a11f799e023864ff7082c1f49c0cc18351a13b47
--- /dev/null
+++ b/Waifu-Anime-RCV/lib/infer_pack/transforms.py
@@ -0,0 +1,209 @@
+import torch
+from torch.nn import functional as F
+
+import numpy as np
+
+
+DEFAULT_MIN_BIN_WIDTH = 1e-3
+DEFAULT_MIN_BIN_HEIGHT = 1e-3
+DEFAULT_MIN_DERIVATIVE = 1e-3
+
+
+def piecewise_rational_quadratic_transform(
+ inputs,
+ unnormalized_widths,
+ unnormalized_heights,
+ unnormalized_derivatives,
+ inverse=False,
+ tails=None,
+ tail_bound=1.0,
+ min_bin_width=DEFAULT_MIN_BIN_WIDTH,
+ min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
+ min_derivative=DEFAULT_MIN_DERIVATIVE,
+):
+ if tails is None:
+ spline_fn = rational_quadratic_spline
+ spline_kwargs = {}
+ else:
+ spline_fn = unconstrained_rational_quadratic_spline
+ spline_kwargs = {"tails": tails, "tail_bound": tail_bound}
+
+ outputs, logabsdet = spline_fn(
+ inputs=inputs,
+ unnormalized_widths=unnormalized_widths,
+ unnormalized_heights=unnormalized_heights,
+ unnormalized_derivatives=unnormalized_derivatives,
+ inverse=inverse,
+ min_bin_width=min_bin_width,
+ min_bin_height=min_bin_height,
+ min_derivative=min_derivative,
+ **spline_kwargs
+ )
+ return outputs, logabsdet
+
+
+def searchsorted(bin_locations, inputs, eps=1e-6):
+ bin_locations[..., -1] += eps
+ return torch.sum(inputs[..., None] >= bin_locations, dim=-1) - 1
+
+
+def unconstrained_rational_quadratic_spline(
+ inputs,
+ unnormalized_widths,
+ unnormalized_heights,
+ unnormalized_derivatives,
+ inverse=False,
+ tails="linear",
+ tail_bound=1.0,
+ min_bin_width=DEFAULT_MIN_BIN_WIDTH,
+ min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
+ min_derivative=DEFAULT_MIN_DERIVATIVE,
+):
+ inside_interval_mask = (inputs >= -tail_bound) & (inputs <= tail_bound)
+ outside_interval_mask = ~inside_interval_mask
+
+ outputs = torch.zeros_like(inputs)
+ logabsdet = torch.zeros_like(inputs)
+
+ if tails == "linear":
+ unnormalized_derivatives = F.pad(unnormalized_derivatives, pad=(1, 1))
+ constant = np.log(np.exp(1 - min_derivative) - 1)
+ unnormalized_derivatives[..., 0] = constant
+ unnormalized_derivatives[..., -1] = constant
+
+ outputs[outside_interval_mask] = inputs[outside_interval_mask]
+ logabsdet[outside_interval_mask] = 0
+ else:
+ raise RuntimeError("{} tails are not implemented.".format(tails))
+
+ (
+ outputs[inside_interval_mask],
+ logabsdet[inside_interval_mask],
+ ) = rational_quadratic_spline(
+ inputs=inputs[inside_interval_mask],
+ unnormalized_widths=unnormalized_widths[inside_interval_mask, :],
+ unnormalized_heights=unnormalized_heights[inside_interval_mask, :],
+ unnormalized_derivatives=unnormalized_derivatives[inside_interval_mask, :],
+ inverse=inverse,
+ left=-tail_bound,
+ right=tail_bound,
+ bottom=-tail_bound,
+ top=tail_bound,
+ min_bin_width=min_bin_width,
+ min_bin_height=min_bin_height,
+ min_derivative=min_derivative,
+ )
+
+ return outputs, logabsdet
+
+
+def rational_quadratic_spline(
+ inputs,
+ unnormalized_widths,
+ unnormalized_heights,
+ unnormalized_derivatives,
+ inverse=False,
+ left=0.0,
+ right=1.0,
+ bottom=0.0,
+ top=1.0,
+ min_bin_width=DEFAULT_MIN_BIN_WIDTH,
+ min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
+ min_derivative=DEFAULT_MIN_DERIVATIVE,
+):
+ if torch.min(inputs) < left or torch.max(inputs) > right:
+ raise ValueError("Input to a transform is not within its domain")
+
+ num_bins = unnormalized_widths.shape[-1]
+
+ if min_bin_width * num_bins > 1.0:
+ raise ValueError("Minimal bin width too large for the number of bins")
+ if min_bin_height * num_bins > 1.0:
+ raise ValueError("Minimal bin height too large for the number of bins")
+
+ widths = F.softmax(unnormalized_widths, dim=-1)
+ widths = min_bin_width + (1 - min_bin_width * num_bins) * widths
+ cumwidths = torch.cumsum(widths, dim=-1)
+ cumwidths = F.pad(cumwidths, pad=(1, 0), mode="constant", value=0.0)
+ cumwidths = (right - left) * cumwidths + left
+ cumwidths[..., 0] = left
+ cumwidths[..., -1] = right
+ widths = cumwidths[..., 1:] - cumwidths[..., :-1]
+
+ derivatives = min_derivative + F.softplus(unnormalized_derivatives)
+
+ heights = F.softmax(unnormalized_heights, dim=-1)
+ heights = min_bin_height + (1 - min_bin_height * num_bins) * heights
+ cumheights = torch.cumsum(heights, dim=-1)
+ cumheights = F.pad(cumheights, pad=(1, 0), mode="constant", value=0.0)
+ cumheights = (top - bottom) * cumheights + bottom
+ cumheights[..., 0] = bottom
+ cumheights[..., -1] = top
+ heights = cumheights[..., 1:] - cumheights[..., :-1]
+
+ if inverse:
+ bin_idx = searchsorted(cumheights, inputs)[..., None]
+ else:
+ bin_idx = searchsorted(cumwidths, inputs)[..., None]
+
+ input_cumwidths = cumwidths.gather(-1, bin_idx)[..., 0]
+ input_bin_widths = widths.gather(-1, bin_idx)[..., 0]
+
+ input_cumheights = cumheights.gather(-1, bin_idx)[..., 0]
+ delta = heights / widths
+ input_delta = delta.gather(-1, bin_idx)[..., 0]
+
+ input_derivatives = derivatives.gather(-1, bin_idx)[..., 0]
+ input_derivatives_plus_one = derivatives[..., 1:].gather(-1, bin_idx)[..., 0]
+
+ input_heights = heights.gather(-1, bin_idx)[..., 0]
+
+ if inverse:
+ a = (inputs - input_cumheights) * (
+ input_derivatives + input_derivatives_plus_one - 2 * input_delta
+ ) + input_heights * (input_delta - input_derivatives)
+ b = input_heights * input_derivatives - (inputs - input_cumheights) * (
+ input_derivatives + input_derivatives_plus_one - 2 * input_delta
+ )
+ c = -input_delta * (inputs - input_cumheights)
+
+ discriminant = b.pow(2) - 4 * a * c
+ assert (discriminant >= 0).all()
+
+ root = (2 * c) / (-b - torch.sqrt(discriminant))
+ outputs = root * input_bin_widths + input_cumwidths
+
+ theta_one_minus_theta = root * (1 - root)
+ denominator = input_delta + (
+ (input_derivatives + input_derivatives_plus_one - 2 * input_delta)
+ * theta_one_minus_theta
+ )
+ derivative_numerator = input_delta.pow(2) * (
+ input_derivatives_plus_one * root.pow(2)
+ + 2 * input_delta * theta_one_minus_theta
+ + input_derivatives * (1 - root).pow(2)
+ )
+ logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator)
+
+ return outputs, -logabsdet
+ else:
+ theta = (inputs - input_cumwidths) / input_bin_widths
+ theta_one_minus_theta = theta * (1 - theta)
+
+ numerator = input_heights * (
+ input_delta * theta.pow(2) + input_derivatives * theta_one_minus_theta
+ )
+ denominator = input_delta + (
+ (input_derivatives + input_derivatives_plus_one - 2 * input_delta)
+ * theta_one_minus_theta
+ )
+ outputs = input_cumheights + numerator / denominator
+
+ derivative_numerator = input_delta.pow(2) * (
+ input_derivatives_plus_one * theta.pow(2)
+ + 2 * input_delta * theta_one_minus_theta
+ + input_derivatives * (1 - theta).pow(2)
+ )
+ logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator)
+
+ return outputs, logabsdet
diff --git a/Waifu-Anime-RCV/requirements.txt b/Waifu-Anime-RCV/requirements.txt
new file mode 100644
index 0000000000000000000000000000000000000000..ec03a8f43e8d191a6beebef854e1fcda7cbbed11
--- /dev/null
+++ b/Waifu-Anime-RCV/requirements.txt
@@ -0,0 +1,22 @@
+wheel
+setuptools
+ffmpeg
+torch
+numba==0.56.4
+numpy==1.23.5
+scipy==1.9.3
+librosa==0.9.1
+fairseq==0.12.2
+faiss-cpu==1.7.3
+gradio==3.50.2
+pyworld>=0.3.2
+soundfile>=0.12.1
+praat-parselmouth>=0.4.2
+httpx
+tensorboard
+tensorboardX
+torchcrepe
+onnxruntime
+demucs
+edge-tts
+yt_dlp
\ No newline at end of file
diff --git a/Waifu-Anime-RCV/rmvpe.pt b/Waifu-Anime-RCV/rmvpe.pt
new file mode 100644
index 0000000000000000000000000000000000000000..bae4def4f226bb41cc24f800dc463cdf08940e6b
--- /dev/null
+++ b/Waifu-Anime-RCV/rmvpe.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a5ed4719f59085d1affc5d81354c70828c740584f2d24e782523345a6a278962
+size 181189687
diff --git a/Waifu-Anime-RCV/rmvpe.py b/Waifu-Anime-RCV/rmvpe.py
new file mode 100644
index 0000000000000000000000000000000000000000..3ad346141340e03bdbaa20121e1ed435bb3da57a
--- /dev/null
+++ b/Waifu-Anime-RCV/rmvpe.py
@@ -0,0 +1,432 @@
+import sys, torch, numpy as np, traceback, pdb
+import torch.nn as nn
+from time import time as ttime
+import torch.nn.functional as F
+
+
+class BiGRU(nn.Module):
+ def __init__(self, input_features, hidden_features, num_layers):
+ super(BiGRU, self).__init__()
+ self.gru = nn.GRU(
+ input_features,
+ hidden_features,
+ num_layers=num_layers,
+ batch_first=True,
+ bidirectional=True,
+ )
+
+ def forward(self, x):
+ return self.gru(x)[0]
+
+
+class ConvBlockRes(nn.Module):
+ def __init__(self, in_channels, out_channels, momentum=0.01):
+ super(ConvBlockRes, self).__init__()
+ self.conv = nn.Sequential(
+ nn.Conv2d(
+ in_channels=in_channels,
+ out_channels=out_channels,
+ kernel_size=(3, 3),
+ stride=(1, 1),
+ padding=(1, 1),
+ bias=False,
+ ),
+ nn.BatchNorm2d(out_channels, momentum=momentum),
+ nn.ReLU(),
+ nn.Conv2d(
+ in_channels=out_channels,
+ out_channels=out_channels,
+ kernel_size=(3, 3),
+ stride=(1, 1),
+ padding=(1, 1),
+ bias=False,
+ ),
+ nn.BatchNorm2d(out_channels, momentum=momentum),
+ nn.ReLU(),
+ )
+ if in_channels != out_channels:
+ self.shortcut = nn.Conv2d(in_channels, out_channels, (1, 1))
+ self.is_shortcut = True
+ else:
+ self.is_shortcut = False
+
+ def forward(self, x):
+ if self.is_shortcut:
+ return self.conv(x) + self.shortcut(x)
+ else:
+ return self.conv(x) + x
+
+
+class Encoder(nn.Module):
+ def __init__(
+ self,
+ in_channels,
+ in_size,
+ n_encoders,
+ kernel_size,
+ n_blocks,
+ out_channels=16,
+ momentum=0.01,
+ ):
+ super(Encoder, self).__init__()
+ self.n_encoders = n_encoders
+ self.bn = nn.BatchNorm2d(in_channels, momentum=momentum)
+ self.layers = nn.ModuleList()
+ self.latent_channels = []
+ for i in range(self.n_encoders):
+ self.layers.append(
+ ResEncoderBlock(
+ in_channels, out_channels, kernel_size, n_blocks, momentum=momentum
+ )
+ )
+ self.latent_channels.append([out_channels, in_size])
+ in_channels = out_channels
+ out_channels *= 2
+ in_size //= 2
+ self.out_size = in_size
+ self.out_channel = out_channels
+
+ def forward(self, x):
+ concat_tensors = []
+ x = self.bn(x)
+ for i in range(self.n_encoders):
+ _, x = self.layers[i](x)
+ concat_tensors.append(_)
+ return x, concat_tensors
+
+
+class ResEncoderBlock(nn.Module):
+ def __init__(
+ self, in_channels, out_channels, kernel_size, n_blocks=1, momentum=0.01
+ ):
+ super(ResEncoderBlock, self).__init__()
+ self.n_blocks = n_blocks
+ self.conv = nn.ModuleList()
+ self.conv.append(ConvBlockRes(in_channels, out_channels, momentum))
+ for i in range(n_blocks - 1):
+ self.conv.append(ConvBlockRes(out_channels, out_channels, momentum))
+ self.kernel_size = kernel_size
+ if self.kernel_size is not None:
+ self.pool = nn.AvgPool2d(kernel_size=kernel_size)
+
+ def forward(self, x):
+ for i in range(self.n_blocks):
+ x = self.conv[i](x)
+ if self.kernel_size is not None:
+ return x, self.pool(x)
+ else:
+ return x
+
+
+class Intermediate(nn.Module): #
+ def __init__(self, in_channels, out_channels, n_inters, n_blocks, momentum=0.01):
+ super(Intermediate, self).__init__()
+ self.n_inters = n_inters
+ self.layers = nn.ModuleList()
+ self.layers.append(
+ ResEncoderBlock(in_channels, out_channels, None, n_blocks, momentum)
+ )
+ for i in range(self.n_inters - 1):
+ self.layers.append(
+ ResEncoderBlock(out_channels, out_channels, None, n_blocks, momentum)
+ )
+
+ def forward(self, x):
+ for i in range(self.n_inters):
+ x = self.layers[i](x)
+ return x
+
+
+class ResDecoderBlock(nn.Module):
+ def __init__(self, in_channels, out_channels, stride, n_blocks=1, momentum=0.01):
+ super(ResDecoderBlock, self).__init__()
+ out_padding = (0, 1) if stride == (1, 2) else (1, 1)
+ self.n_blocks = n_blocks
+ self.conv1 = nn.Sequential(
+ nn.ConvTranspose2d(
+ in_channels=in_channels,
+ out_channels=out_channels,
+ kernel_size=(3, 3),
+ stride=stride,
+ padding=(1, 1),
+ output_padding=out_padding,
+ bias=False,
+ ),
+ nn.BatchNorm2d(out_channels, momentum=momentum),
+ nn.ReLU(),
+ )
+ self.conv2 = nn.ModuleList()
+ self.conv2.append(ConvBlockRes(out_channels * 2, out_channels, momentum))
+ for i in range(n_blocks - 1):
+ self.conv2.append(ConvBlockRes(out_channels, out_channels, momentum))
+
+ def forward(self, x, concat_tensor):
+ x = self.conv1(x)
+ x = torch.cat((x, concat_tensor), dim=1)
+ for i in range(self.n_blocks):
+ x = self.conv2[i](x)
+ return x
+
+
+class Decoder(nn.Module):
+ def __init__(self, in_channels, n_decoders, stride, n_blocks, momentum=0.01):
+ super(Decoder, self).__init__()
+ self.layers = nn.ModuleList()
+ self.n_decoders = n_decoders
+ for i in range(self.n_decoders):
+ out_channels = in_channels // 2
+ self.layers.append(
+ ResDecoderBlock(in_channels, out_channels, stride, n_blocks, momentum)
+ )
+ in_channels = out_channels
+
+ def forward(self, x, concat_tensors):
+ for i in range(self.n_decoders):
+ x = self.layers[i](x, concat_tensors[-1 - i])
+ return x
+
+
+class DeepUnet(nn.Module):
+ def __init__(
+ self,
+ kernel_size,
+ n_blocks,
+ en_de_layers=5,
+ inter_layers=4,
+ in_channels=1,
+ en_out_channels=16,
+ ):
+ super(DeepUnet, self).__init__()
+ self.encoder = Encoder(
+ in_channels, 128, en_de_layers, kernel_size, n_blocks, en_out_channels
+ )
+ self.intermediate = Intermediate(
+ self.encoder.out_channel // 2,
+ self.encoder.out_channel,
+ inter_layers,
+ n_blocks,
+ )
+ self.decoder = Decoder(
+ self.encoder.out_channel, en_de_layers, kernel_size, n_blocks
+ )
+
+ def forward(self, x):
+ x, concat_tensors = self.encoder(x)
+ x = self.intermediate(x)
+ x = self.decoder(x, concat_tensors)
+ return x
+
+
+class E2E(nn.Module):
+ def __init__(
+ self,
+ n_blocks,
+ n_gru,
+ kernel_size,
+ en_de_layers=5,
+ inter_layers=4,
+ in_channels=1,
+ en_out_channels=16,
+ ):
+ super(E2E, self).__init__()
+ self.unet = DeepUnet(
+ kernel_size,
+ n_blocks,
+ en_de_layers,
+ inter_layers,
+ in_channels,
+ en_out_channels,
+ )
+ self.cnn = nn.Conv2d(en_out_channels, 3, (3, 3), padding=(1, 1))
+ if n_gru:
+ self.fc = nn.Sequential(
+ BiGRU(3 * 128, 256, n_gru),
+ nn.Linear(512, 360),
+ nn.Dropout(0.25),
+ nn.Sigmoid(),
+ )
+ else:
+ self.fc = nn.Sequential(
+ nn.Linear(3 * N_MELS, N_CLASS), nn.Dropout(0.25), nn.Sigmoid()
+ )
+
+ def forward(self, mel):
+ mel = mel.transpose(-1, -2).unsqueeze(1)
+ x = self.cnn(self.unet(mel)).transpose(1, 2).flatten(-2)
+ x = self.fc(x)
+ return x
+
+
+from librosa.filters import mel
+
+
+class MelSpectrogram(torch.nn.Module):
+ def __init__(
+ self,
+ is_half,
+ n_mel_channels,
+ sampling_rate,
+ win_length,
+ hop_length,
+ n_fft=None,
+ mel_fmin=0,
+ mel_fmax=None,
+ clamp=1e-5,
+ ):
+ super().__init__()
+ n_fft = win_length if n_fft is None else n_fft
+ self.hann_window = {}
+ mel_basis = mel(
+ sr=sampling_rate,
+ n_fft=n_fft,
+ n_mels=n_mel_channels,
+ fmin=mel_fmin,
+ fmax=mel_fmax,
+ htk=True,
+ )
+ mel_basis = torch.from_numpy(mel_basis).float()
+ self.register_buffer("mel_basis", mel_basis)
+ self.n_fft = win_length if n_fft is None else n_fft
+ self.hop_length = hop_length
+ self.win_length = win_length
+ self.sampling_rate = sampling_rate
+ self.n_mel_channels = n_mel_channels
+ self.clamp = clamp
+ self.is_half = is_half
+
+ def forward(self, audio, keyshift=0, speed=1, center=True):
+ factor = 2 ** (keyshift / 12)
+ n_fft_new = int(np.round(self.n_fft * factor))
+ win_length_new = int(np.round(self.win_length * factor))
+ hop_length_new = int(np.round(self.hop_length * speed))
+ keyshift_key = str(keyshift) + "_" + str(audio.device)
+ if keyshift_key not in self.hann_window:
+ self.hann_window[keyshift_key] = torch.hann_window(win_length_new).to(
+ audio.device
+ )
+ fft = torch.stft(
+ audio,
+ n_fft=n_fft_new,
+ hop_length=hop_length_new,
+ win_length=win_length_new,
+ window=self.hann_window[keyshift_key],
+ center=center,
+ return_complex=True,
+ )
+ magnitude = torch.sqrt(fft.real.pow(2) + fft.imag.pow(2))
+ if keyshift != 0:
+ size = self.n_fft // 2 + 1
+ resize = magnitude.size(1)
+ if resize < size:
+ magnitude = F.pad(magnitude, (0, 0, 0, size - resize))
+ magnitude = magnitude[:, :size, :] * self.win_length / win_length_new
+ mel_output = torch.matmul(self.mel_basis, magnitude)
+ if self.is_half == True:
+ mel_output = mel_output.half()
+ log_mel_spec = torch.log(torch.clamp(mel_output, min=self.clamp))
+ return log_mel_spec
+
+
+class RMVPE:
+ def __init__(self, model_path, is_half, device=None):
+ self.resample_kernel = {}
+ model = E2E(4, 1, (2, 2))
+ ckpt = torch.load(model_path, map_location="cpu")
+ model.load_state_dict(ckpt)
+ model.eval()
+ if is_half == True:
+ model = model.half()
+ self.model = model
+ self.resample_kernel = {}
+ self.is_half = is_half
+ if device is None:
+ device = "cuda" if torch.cuda.is_available() else "cpu"
+ self.device = device
+ self.mel_extractor = MelSpectrogram(
+ is_half, 128, 16000, 1024, 160, None, 30, 8000
+ ).to(device)
+ self.model = self.model.to(device)
+ cents_mapping = 20 * np.arange(360) + 1997.3794084376191
+ self.cents_mapping = np.pad(cents_mapping, (4, 4)) # 368
+
+ def mel2hidden(self, mel):
+ with torch.no_grad():
+ n_frames = mel.shape[-1]
+ mel = F.pad(
+ mel, (0, 32 * ((n_frames - 1) // 32 + 1) - n_frames), mode="reflect"
+ )
+ hidden = self.model(mel)
+ return hidden[:, :n_frames]
+
+ def decode(self, hidden, thred=0.03):
+ cents_pred = self.to_local_average_cents(hidden, thred=thred)
+ f0 = 10 * (2 ** (cents_pred / 1200))
+ f0[f0 == 10] = 0
+ # f0 = np.array([10 * (2 ** (cent_pred / 1200)) if cent_pred else 0 for cent_pred in cents_pred])
+ return f0
+
+ def infer_from_audio(self, audio, thred=0.03):
+ audio = torch.from_numpy(audio).float().to(self.device).unsqueeze(0)
+ # torch.cuda.synchronize()
+ # t0=ttime()
+ mel = self.mel_extractor(audio, center=True)
+ # torch.cuda.synchronize()
+ # t1=ttime()
+ hidden = self.mel2hidden(mel)
+ # torch.cuda.synchronize()
+ # t2=ttime()
+ hidden = hidden.squeeze(0).cpu().numpy()
+ if self.is_half == True:
+ hidden = hidden.astype("float32")
+ f0 = self.decode(hidden, thred=thred)
+ # torch.cuda.synchronize()
+ # t3=ttime()
+ # print("hmvpe:%s\t%s\t%s\t%s"%(t1-t0,t2-t1,t3-t2,t3-t0))
+ return f0
+
+ def to_local_average_cents(self, salience, thred=0.05):
+ # t0 = ttime()
+ center = np.argmax(salience, axis=1) # 帧长#index
+ salience = np.pad(salience, ((0, 0), (4, 4))) # 帧长,368
+ # t1 = ttime()
+ center += 4
+ todo_salience = []
+ todo_cents_mapping = []
+ starts = center - 4
+ ends = center + 5
+ for idx in range(salience.shape[0]):
+ todo_salience.append(salience[:, starts[idx] : ends[idx]][idx])
+ todo_cents_mapping.append(self.cents_mapping[starts[idx] : ends[idx]])
+ # t2 = ttime()
+ todo_salience = np.array(todo_salience) # 帧长,9
+ todo_cents_mapping = np.array(todo_cents_mapping) # 帧长,9
+ product_sum = np.sum(todo_salience * todo_cents_mapping, 1)
+ weight_sum = np.sum(todo_salience, 1) # 帧长
+ devided = product_sum / weight_sum # 帧长
+ # t3 = ttime()
+ maxx = np.max(salience, axis=1) # 帧长
+ devided[maxx <= thred] = 0
+ # t4 = ttime()
+ # print("decode:%s\t%s\t%s\t%s" % (t1 - t0, t2 - t1, t3 - t2, t4 - t3))
+ return devided
+
+
+# if __name__ == '__main__':
+# audio, sampling_rate = sf.read("卢本伟语录~1.wav")
+# if len(audio.shape) > 1:
+# audio = librosa.to_mono(audio.transpose(1, 0))
+# audio_bak = audio.copy()
+# if sampling_rate != 16000:
+# audio = librosa.resample(audio, orig_sr=sampling_rate, target_sr=16000)
+# model_path = "/bili-coeus/jupyter/jupyterhub-liujing04/vits_ch/test-RMVPE/weights/rmvpe_llc_half.pt"
+# thred = 0.03 # 0.01
+# device = 'cuda' if torch.cuda.is_available() else 'cpu'
+# rmvpe = RMVPE(model_path,is_half=False, device=device)
+# t0=ttime()
+# f0 = rmvpe.infer_from_audio(audio, thred=thred)
+# f0 = rmvpe.infer_from_audio(audio, thred=thred)
+# f0 = rmvpe.infer_from_audio(audio, thred=thred)
+# f0 = rmvpe.infer_from_audio(audio, thred=thred)
+# f0 = rmvpe.infer_from_audio(audio, thred=thred)
+# t1=ttime()
+# print(f0.shape,t1-t0)
diff --git a/Waifu-Anime-RCV/soyo-nagasaki.gif b/Waifu-Anime-RCV/soyo-nagasaki.gif
new file mode 100644
index 0000000000000000000000000000000000000000..6db9fd6f14ee20dfe7b8f07f216719af98750ac2
--- /dev/null
+++ b/Waifu-Anime-RCV/soyo-nagasaki.gif
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:140cd29047b06831e27458f9a392c33c81cac166e922eb86f1dbc7665aedf5b6
+size 1957045
diff --git a/Waifu-Anime-RCV/vc_infer_pipeline.py b/Waifu-Anime-RCV/vc_infer_pipeline.py
new file mode 100644
index 0000000000000000000000000000000000000000..82c15f59a8072e1b317fa1d750ccc1b814a6989d
--- /dev/null
+++ b/Waifu-Anime-RCV/vc_infer_pipeline.py
@@ -0,0 +1,443 @@
+import numpy as np, parselmouth, torch, pdb, sys, os
+from time import time as ttime
+import torch.nn.functional as F
+import scipy.signal as signal
+import pyworld, os, traceback, faiss, librosa, torchcrepe
+from scipy import signal
+from functools import lru_cache
+
+now_dir = os.getcwd()
+sys.path.append(now_dir)
+
+bh, ah = signal.butter(N=5, Wn=48, btype="high", fs=16000)
+
+input_audio_path2wav = {}
+
+
+@lru_cache
+def cache_harvest_f0(input_audio_path, fs, f0max, f0min, frame_period):
+ audio = input_audio_path2wav[input_audio_path]
+ f0, t = pyworld.harvest(
+ audio,
+ fs=fs,
+ f0_ceil=f0max,
+ f0_floor=f0min,
+ frame_period=frame_period,
+ )
+ f0 = pyworld.stonemask(audio, f0, t, fs)
+ return f0
+
+
+def change_rms(data1, sr1, data2, sr2, rate): # 1是输入音频,2是输出音频,rate是2的占比
+ # print(data1.max(),data2.max())
+ rms1 = librosa.feature.rms(
+ y=data1, frame_length=sr1 // 2 * 2, hop_length=sr1 // 2
+ ) # 每半秒一个点
+ rms2 = librosa.feature.rms(y=data2, frame_length=sr2 // 2 * 2, hop_length=sr2 // 2)
+ rms1 = torch.from_numpy(rms1)
+ rms1 = F.interpolate(
+ rms1.unsqueeze(0), size=data2.shape[0], mode="linear"
+ ).squeeze()
+ rms2 = torch.from_numpy(rms2)
+ rms2 = F.interpolate(
+ rms2.unsqueeze(0), size=data2.shape[0], mode="linear"
+ ).squeeze()
+ rms2 = torch.max(rms2, torch.zeros_like(rms2) + 1e-6)
+ data2 *= (
+ torch.pow(rms1, torch.tensor(1 - rate))
+ * torch.pow(rms2, torch.tensor(rate - 1))
+ ).numpy()
+ return data2
+
+
+class VC(object):
+ def __init__(self, tgt_sr, config):
+ self.x_pad, self.x_query, self.x_center, self.x_max, self.is_half = (
+ config.x_pad,
+ config.x_query,
+ config.x_center,
+ config.x_max,
+ config.is_half,
+ )
+ self.sr = 16000 # hubert输入采样率
+ self.window = 160 # 每帧点数
+ self.t_pad = self.sr * self.x_pad # 每条前后pad时间
+ self.t_pad_tgt = tgt_sr * self.x_pad
+ self.t_pad2 = self.t_pad * 2
+ self.t_query = self.sr * self.x_query # 查询切点前后查询时间
+ self.t_center = self.sr * self.x_center # 查询切点位置
+ self.t_max = self.sr * self.x_max # 免查询时长阈值
+ self.device = config.device
+
+ def get_f0(
+ self,
+ input_audio_path,
+ x,
+ p_len,
+ f0_up_key,
+ f0_method,
+ filter_radius,
+ inp_f0=None,
+ ):
+ global input_audio_path2wav
+ time_step = self.window / self.sr * 1000
+ f0_min = 50
+ f0_max = 1100
+ f0_mel_min = 1127 * np.log(1 + f0_min / 700)
+ f0_mel_max = 1127 * np.log(1 + f0_max / 700)
+ if f0_method == "pm":
+ f0 = (
+ parselmouth.Sound(x, self.sr)
+ .to_pitch_ac(
+ time_step=time_step / 1000,
+ voicing_threshold=0.6,
+ pitch_floor=f0_min,
+ pitch_ceiling=f0_max,
+ )
+ .selected_array["frequency"]
+ )
+ pad_size = (p_len - len(f0) + 1) // 2
+ if pad_size > 0 or p_len - len(f0) - pad_size > 0:
+ f0 = np.pad(
+ f0, [[pad_size, p_len - len(f0) - pad_size]], mode="constant"
+ )
+ elif f0_method == "harvest":
+ input_audio_path2wav[input_audio_path] = x.astype(np.double)
+ f0 = cache_harvest_f0(input_audio_path, self.sr, f0_max, f0_min, 10)
+ if filter_radius > 2:
+ f0 = signal.medfilt(f0, 3)
+ elif f0_method == "crepe":
+ model = "full"
+ # Pick a batch size that doesn't cause memory errors on your gpu
+ batch_size = 512
+ # Compute pitch using first gpu
+ audio = torch.tensor(np.copy(x))[None].float()
+ f0, pd = torchcrepe.predict(
+ audio,
+ self.sr,
+ self.window,
+ f0_min,
+ f0_max,
+ model,
+ batch_size=batch_size,
+ device=self.device,
+ return_periodicity=True,
+ )
+ pd = torchcrepe.filter.median(pd, 3)
+ f0 = torchcrepe.filter.mean(f0, 3)
+ f0[pd < 0.1] = 0
+ f0 = f0[0].cpu().numpy()
+ elif f0_method == "rmvpe":
+ if hasattr(self, "model_rmvpe") == False:
+ from rmvpe import RMVPE
+
+ print("loading rmvpe model")
+ self.model_rmvpe = RMVPE(
+ "rmvpe.pt", is_half=self.is_half, device=self.device
+ )
+ f0 = self.model_rmvpe.infer_from_audio(x, thred=0.03)
+ f0 *= pow(2, f0_up_key / 12)
+ # with open("test.txt","w")as f:f.write("\n".join([str(i)for i in f0.tolist()]))
+ tf0 = self.sr // self.window # 每秒f0点数
+ if inp_f0 is not None:
+ delta_t = np.round(
+ (inp_f0[:, 0].max() - inp_f0[:, 0].min()) * tf0 + 1
+ ).astype("int16")
+ replace_f0 = np.interp(
+ list(range(delta_t)), inp_f0[:, 0] * 100, inp_f0[:, 1]
+ )
+ shape = f0[self.x_pad * tf0 : self.x_pad * tf0 + len(replace_f0)].shape[0]
+ f0[self.x_pad * tf0 : self.x_pad * tf0 + len(replace_f0)] = replace_f0[
+ :shape
+ ]
+ # with open("test_opt.txt","w")as f:f.write("\n".join([str(i)for i in f0.tolist()]))
+ f0bak = f0.copy()
+ f0_mel = 1127 * np.log(1 + f0 / 700)
+ f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - f0_mel_min) * 254 / (
+ f0_mel_max - f0_mel_min
+ ) + 1
+ f0_mel[f0_mel <= 1] = 1
+ f0_mel[f0_mel > 255] = 255
+ f0_coarse = np.rint(f0_mel).astype(np.int)
+ return f0_coarse, f0bak # 1-0
+
+ def vc(
+ self,
+ model,
+ net_g,
+ sid,
+ audio0,
+ pitch,
+ pitchf,
+ times,
+ index,
+ big_npy,
+ index_rate,
+ version,
+ protect,
+ ): # ,file_index,file_big_npy
+ feats = torch.from_numpy(audio0)
+ if self.is_half:
+ feats = feats.half()
+ else:
+ feats = feats.float()
+ if feats.dim() == 2: # double channels
+ feats = feats.mean(-1)
+ assert feats.dim() == 1, feats.dim()
+ feats = feats.view(1, -1)
+ padding_mask = torch.BoolTensor(feats.shape).to(self.device).fill_(False)
+
+ inputs = {
+ "source": feats.to(self.device),
+ "padding_mask": padding_mask,
+ "output_layer": 9 if version == "v1" else 12,
+ }
+ t0 = ttime()
+ with torch.no_grad():
+ logits = model.extract_features(**inputs)
+ feats = model.final_proj(logits[0]) if version == "v1" else logits[0]
+ if protect < 0.5 and pitch != None and pitchf != None:
+ feats0 = feats.clone()
+ if (
+ isinstance(index, type(None)) == False
+ and isinstance(big_npy, type(None)) == False
+ and index_rate != 0
+ ):
+ npy = feats[0].cpu().numpy()
+ if self.is_half:
+ npy = npy.astype("float32")
+
+ # _, I = index.search(npy, 1)
+ # npy = big_npy[I.squeeze()]
+
+ score, ix = index.search(npy, k=8)
+ weight = np.square(1 / score)
+ weight /= weight.sum(axis=1, keepdims=True)
+ npy = np.sum(big_npy[ix] * np.expand_dims(weight, axis=2), axis=1)
+
+ if self.is_half:
+ npy = npy.astype("float16")
+ feats = (
+ torch.from_numpy(npy).unsqueeze(0).to(self.device) * index_rate
+ + (1 - index_rate) * feats
+ )
+
+ feats = F.interpolate(feats.permute(0, 2, 1), scale_factor=2).permute(0, 2, 1)
+ if protect < 0.5 and pitch != None and pitchf != None:
+ feats0 = F.interpolate(feats0.permute(0, 2, 1), scale_factor=2).permute(
+ 0, 2, 1
+ )
+ t1 = ttime()
+ p_len = audio0.shape[0] // self.window
+ if feats.shape[1] < p_len:
+ p_len = feats.shape[1]
+ if pitch != None and pitchf != None:
+ pitch = pitch[:, :p_len]
+ pitchf = pitchf[:, :p_len]
+
+ if protect < 0.5 and pitch != None and pitchf != None:
+ pitchff = pitchf.clone()
+ pitchff[pitchf > 0] = 1
+ pitchff[pitchf < 1] = protect
+ pitchff = pitchff.unsqueeze(-1)
+ feats = feats * pitchff + feats0 * (1 - pitchff)
+ feats = feats.to(feats0.dtype)
+ p_len = torch.tensor([p_len], device=self.device).long()
+ with torch.no_grad():
+ if pitch != None and pitchf != None:
+ audio1 = (
+ (net_g.infer(feats, p_len, pitch, pitchf, sid)[0][0, 0])
+ .data.cpu()
+ .float()
+ .numpy()
+ )
+ else:
+ audio1 = (
+ (net_g.infer(feats, p_len, sid)[0][0, 0]).data.cpu().float().numpy()
+ )
+ del feats, p_len, padding_mask
+ if torch.cuda.is_available():
+ torch.cuda.empty_cache()
+ t2 = ttime()
+ times[0] += t1 - t0
+ times[2] += t2 - t1
+ return audio1
+
+ def pipeline(
+ self,
+ model,
+ net_g,
+ sid,
+ audio,
+ input_audio_path,
+ times,
+ f0_up_key,
+ f0_method,
+ file_index,
+ # file_big_npy,
+ index_rate,
+ if_f0,
+ filter_radius,
+ tgt_sr,
+ resample_sr,
+ rms_mix_rate,
+ version,
+ protect,
+ f0_file=None,
+ ):
+ if (
+ file_index != ""
+ # and file_big_npy != ""
+ # and os.path.exists(file_big_npy) == True
+ and os.path.exists(file_index) == True
+ and index_rate != 0
+ ):
+ try:
+ index = faiss.read_index(file_index)
+ # big_npy = np.load(file_big_npy)
+ big_npy = index.reconstruct_n(0, index.ntotal)
+ except:
+ traceback.print_exc()
+ index = big_npy = None
+ else:
+ index = big_npy = None
+ audio = signal.filtfilt(bh, ah, audio)
+ audio_pad = np.pad(audio, (self.window // 2, self.window // 2), mode="reflect")
+ opt_ts = []
+ if audio_pad.shape[0] > self.t_max:
+ audio_sum = np.zeros_like(audio)
+ for i in range(self.window):
+ audio_sum += audio_pad[i : i - self.window]
+ for t in range(self.t_center, audio.shape[0], self.t_center):
+ opt_ts.append(
+ t
+ - self.t_query
+ + np.where(
+ np.abs(audio_sum[t - self.t_query : t + self.t_query])
+ == np.abs(audio_sum[t - self.t_query : t + self.t_query]).min()
+ )[0][0]
+ )
+ s = 0
+ audio_opt = []
+ t = None
+ t1 = ttime()
+ audio_pad = np.pad(audio, (self.t_pad, self.t_pad), mode="reflect")
+ p_len = audio_pad.shape[0] // self.window
+ inp_f0 = None
+ if hasattr(f0_file, "name") == True:
+ try:
+ with open(f0_file.name, "r") as f:
+ lines = f.read().strip("\n").split("\n")
+ inp_f0 = []
+ for line in lines:
+ inp_f0.append([float(i) for i in line.split(",")])
+ inp_f0 = np.array(inp_f0, dtype="float32")
+ except:
+ traceback.print_exc()
+ sid = torch.tensor(sid, device=self.device).unsqueeze(0).long()
+ pitch, pitchf = None, None
+ if if_f0 == 1:
+ pitch, pitchf = self.get_f0(
+ input_audio_path,
+ audio_pad,
+ p_len,
+ f0_up_key,
+ f0_method,
+ filter_radius,
+ inp_f0,
+ )
+ pitch = pitch[:p_len]
+ pitchf = pitchf[:p_len]
+ if self.device == "mps":
+ pitchf = pitchf.astype(np.float32)
+ pitch = torch.tensor(pitch, device=self.device).unsqueeze(0).long()
+ pitchf = torch.tensor(pitchf, device=self.device).unsqueeze(0).float()
+ t2 = ttime()
+ times[1] += t2 - t1
+ for t in opt_ts:
+ t = t // self.window * self.window
+ if if_f0 == 1:
+ audio_opt.append(
+ self.vc(
+ model,
+ net_g,
+ sid,
+ audio_pad[s : t + self.t_pad2 + self.window],
+ pitch[:, s // self.window : (t + self.t_pad2) // self.window],
+ pitchf[:, s // self.window : (t + self.t_pad2) // self.window],
+ times,
+ index,
+ big_npy,
+ index_rate,
+ version,
+ protect,
+ )[self.t_pad_tgt : -self.t_pad_tgt]
+ )
+ else:
+ audio_opt.append(
+ self.vc(
+ model,
+ net_g,
+ sid,
+ audio_pad[s : t + self.t_pad2 + self.window],
+ None,
+ None,
+ times,
+ index,
+ big_npy,
+ index_rate,
+ version,
+ protect,
+ )[self.t_pad_tgt : -self.t_pad_tgt]
+ )
+ s = t
+ if if_f0 == 1:
+ audio_opt.append(
+ self.vc(
+ model,
+ net_g,
+ sid,
+ audio_pad[t:],
+ pitch[:, t // self.window :] if t is not None else pitch,
+ pitchf[:, t // self.window :] if t is not None else pitchf,
+ times,
+ index,
+ big_npy,
+ index_rate,
+ version,
+ protect,
+ )[self.t_pad_tgt : -self.t_pad_tgt]
+ )
+ else:
+ audio_opt.append(
+ self.vc(
+ model,
+ net_g,
+ sid,
+ audio_pad[t:],
+ None,
+ None,
+ times,
+ index,
+ big_npy,
+ index_rate,
+ version,
+ protect,
+ )[self.t_pad_tgt : -self.t_pad_tgt]
+ )
+ audio_opt = np.concatenate(audio_opt)
+ if rms_mix_rate != 1:
+ audio_opt = change_rms(audio, 16000, audio_opt, tgt_sr, rms_mix_rate)
+ if resample_sr >= 16000 and tgt_sr != resample_sr:
+ audio_opt = librosa.resample(
+ audio_opt, orig_sr=tgt_sr, target_sr=resample_sr
+ )
+ audio_max = np.abs(audio_opt).max() / 0.99
+ max_int16 = 32768
+ if audio_max > 1:
+ max_int16 /= audio_max
+ audio_opt = (audio_opt * max_int16).astype(np.int16)
+ del pitch, pitchf, sid
+ if torch.cuda.is_available():
+ torch.cuda.empty_cache()
+ return audio_opt