ArkanDash commited on
Commit
8735d68
1 Parent(s): 06813d6

feat: minor update

Browse files
.gitignore CHANGED
@@ -9,8 +9,6 @@
9
  *.user
10
  *.userosscache
11
  *.sln.docstates
12
- load.py
13
- button.py
14
 
15
  # User-specific files (MonoDevelop/Xamarin Studio)
16
  *.userprefs
 
9
  *.user
10
  *.userosscache
11
  *.sln.docstates
 
 
12
 
13
  # User-specific files (MonoDevelop/Xamarin Studio)
14
  *.userprefs
README.md CHANGED
@@ -1,5 +1,5 @@
1
  ---
2
- title: RVC Inference
3
  emoji: 🎤
4
  colorFrom: red
5
  colorTo: purple
 
1
  ---
2
+ title: RVC Genshin Impact
3
  emoji: 🎤
4
  colorFrom: red
5
  colorTo: purple
app-full.py DELETED
@@ -1,289 +0,0 @@
1
- import os
2
- import glob
3
- import json
4
- import traceback
5
- import logging
6
- import gradio as gr
7
- import numpy as np
8
- import librosa
9
- import torch
10
- import asyncio
11
- import edge_tts
12
- import yt_dlp
13
- import ffmpeg
14
- import subprocess
15
- import sys
16
- import io
17
- import wave
18
- from datetime import datetime
19
- from fairseq import checkpoint_utils
20
- from infer_pack.models import SynthesizerTrnMs256NSFsid, SynthesizerTrnMs256NSFsid_nono
21
- from vc_infer_pipeline import VC
22
- from config import Config
23
- config = Config()
24
- logging.getLogger("numba").setLevel(logging.WARNING)
25
-
26
- def create_vc_fn(tgt_sr, net_g, vc, if_f0, file_index):
27
- def vc_fn(
28
- input_audio,
29
- upload_audio,
30
- upload_mode,
31
- f0_up_key,
32
- f0_method,
33
- index_rate,
34
- tts_mode,
35
- tts_text,
36
- tts_voice
37
- ):
38
- try:
39
- if tts_mode:
40
- if tts_text is None or tts_voice is None:
41
- return "You need to enter text and select a voice", None
42
- asyncio.run(edge_tts.Communicate(tts_text, "-".join(tts_voice.split('-')[:-1])).save("tts.mp3"))
43
- audio, sr = librosa.load("tts.mp3", sr=16000, mono=True)
44
- else:
45
- if upload_mode:
46
- if input_audio is None:
47
- return "You need to upload an audio", None
48
- sampling_rate, audio = upload_audio
49
- duration = audio.shape[0] / sampling_rate
50
- audio = (audio / np.iinfo(audio.dtype).max).astype(np.float32)
51
- if len(audio.shape) > 1:
52
- audio = librosa.to_mono(audio.transpose(1, 0))
53
- if sampling_rate != 16000:
54
- audio = librosa.resample(audio, orig_sr=sampling_rate, target_sr=16000)
55
- else:
56
- audio, sr = librosa.load(input_audio, sr=16000, mono=True)
57
- times = [0, 0, 0]
58
- f0_up_key = int(f0_up_key)
59
- audio_opt = vc.pipeline(
60
- hubert_model,
61
- net_g,
62
- 0,
63
- audio,
64
- times,
65
- f0_up_key,
66
- f0_method,
67
- file_index,
68
- index_rate,
69
- if_f0,
70
- f0_file=None,
71
- )
72
- print(
73
- f"[{datetime.now().strftime('%Y-%m-%d %H:%M')}]: npy: {times[0]}, f0: {times[1]}s, infer: {times[2]}s"
74
- )
75
- return (tgt_sr, audio_opt)
76
- except:
77
- info = traceback.format_exc()
78
- print(info)
79
- return info, (None, None)
80
- return vc_fn
81
-
82
- def cut_vocal_and_inst(url, audio_provider, split_model):
83
- if url != "":
84
- if not os.path.exists("dl_audio"):
85
- os.mkdir("dl_audio")
86
- if audio_provider == "Youtube":
87
- ydl_opts = {
88
- 'format': 'bestaudio/best',
89
- 'postprocessors': [{
90
- 'key': 'FFmpegExtractAudio',
91
- 'preferredcodec': 'wav',
92
- }],
93
- "outtmpl": 'dl_audio/youtube_audio',
94
- }
95
- with yt_dlp.YoutubeDL(ydl_opts) as ydl:
96
- ydl.download([url])
97
- audio_path = "dl_audio/youtube_audio.wav"
98
- else:
99
- # Spotify doesnt work.
100
- # Need to find other solution soon.
101
- '''
102
- command = f"spotdl download {url} --output dl_audio/.wav"
103
- result = subprocess.run(command.split(), stdout=subprocess.PIPE)
104
- print(result.stdout.decode())
105
- audio_path = "dl_audio/spotify_audio.wav"
106
- '''
107
- if split_model == "htdemucs":
108
- command = f"demucs --two-stems=vocals {audio_path} -o output"
109
- result = subprocess.run(command.split(), stdout=subprocess.PIPE)
110
- print(result.stdout.decode())
111
- return "output/htdemucs/youtube_audio/vocals.wav", "output/htdemucs/youtube_audio/no_vocals.wav", audio_path, "output/htdemucs/youtube_audio/vocals.wav"
112
- else:
113
- command = f"demucs --two-stems=vocals -n mdx_extra_q {audio_path} -o output"
114
- result = subprocess.run(command.split(), stdout=subprocess.PIPE)
115
- print(result.stdout.decode())
116
- return "output/mdx_extra_q/youtube_audio/vocals.wav", "output/mdx_extra_q/youtube_audio/no_vocals.wav", audio_path, "output/mdx_extra_q/youtube_audio/vocals.wav"
117
- else:
118
- raise gr.Error("URL Required!")
119
- return None, None, None, None
120
-
121
- def combine_vocal_and_inst(audio_data, audio_volume, split_model):
122
- if not os.path.exists("output/result"):
123
- os.mkdir("output/result")
124
- vocal_path = "output/result/output.wav"
125
- output_path = "output/result/combine.mp3"
126
- if split_model == "htdemucs":
127
- inst_path = "output/htdemucs/youtube_audio/no_vocals.wav"
128
- else:
129
- inst_path = "output/mdx_extra_q/youtube_audio/no_vocals.wav"
130
- with wave.open(vocal_path, "w") as wave_file:
131
- wave_file.setnchannels(1)
132
- wave_file.setsampwidth(2)
133
- wave_file.setframerate(audio_data[0])
134
- wave_file.writeframes(audio_data[1].tobytes())
135
- command = f'ffmpeg -y -i {inst_path} -i {vocal_path} -filter_complex [1:a]volume={audio_volume}dB[v];[0:a][v]amix=inputs=2:duration=longest -b:a 320k -c:a libmp3lame {output_path}'
136
- result = subprocess.run(command.split(), stdout=subprocess.PIPE)
137
- print(result.stdout.decode())
138
- return output_path
139
-
140
- def load_hubert():
141
- global hubert_model
142
- models, _, _ = checkpoint_utils.load_model_ensemble_and_task(
143
- ["hubert_base.pt"],
144
- suffix="",
145
- )
146
- hubert_model = models[0]
147
- hubert_model = hubert_model.to(config.device)
148
- if config.is_half:
149
- hubert_model = hubert_model.half()
150
- else:
151
- hubert_model = hubert_model.float()
152
- hubert_model.eval()
153
-
154
- def change_to_tts_mode(tts_mode, upload_mode):
155
- if tts_mode:
156
- return gr.Textbox.update(visible=False), gr.Audio.update(visible=False), gr.Checkbox.update(visible=False), gr.Textbox.update(visible=True), gr.Dropdown.update(visible=True)
157
- else:
158
- if upload_mode:
159
- return gr.Textbox.update(visible=False), gr.Audio.update(visible=True), gr.Checkbox.update(visible=True), gr.Textbox.update(visible=False), gr.Dropdown.update(visible=False)
160
- else:
161
- return gr.Textbox.update(visible=True), gr.Audio.update(visible=False), gr.Checkbox.update(visible=True), gr.Textbox.update(visible=False), gr.Dropdown.update(visible=False)
162
-
163
- def change_to_upload_mode(upload_mode):
164
- if upload_mode:
165
- return gr.Textbox().update(visible=False), gr.Audio().update(visible=True)
166
- else:
167
- return gr.Textbox().update(visible=True), gr.Audio().update(visible=False)
168
-
169
- if __name__ == '__main__':
170
- load_hubert()
171
- categories = []
172
- tts_voice_list = asyncio.get_event_loop().run_until_complete(edge_tts.list_voices())
173
- voices = [f"{v['ShortName']}-{v['Gender']}" for v in tts_voice_list]
174
- with open("weights/folder_info.json", "r", encoding="utf-8") as f:
175
- folder_info = json.load(f)
176
- for category_name, category_info in folder_info.items():
177
- if not category_info['enable']:
178
- continue
179
- category_title = category_info['title']
180
- category_folder = category_info['folder_path']
181
- description = category_info['description']
182
- models = []
183
- with open(f"weights/{category_folder}/model_info.json", "r", encoding="utf-8") as f:
184
- models_info = json.load(f)
185
- for model_name, info in models_info.items():
186
- if not info['enable']:
187
- continue
188
- model_title = info['title']
189
- model_author = info.get("author", None)
190
- model_cover = f"weights/{category_folder}/{model_name}/{info['cover']}"
191
- model_index = f"weights/{category_folder}/{model_name}/{info['feature_retrieval_library']}"
192
- cpt = torch.load(f"weights/{category_folder}/{model_name}/{model_name}.pth", map_location="cpu")
193
- tgt_sr = cpt["config"][-1]
194
- cpt["config"][-3] = cpt["weight"]["emb_g.weight"].shape[0] # n_spk
195
- if_f0 = cpt.get("f0", 1)
196
- if if_f0 == 1:
197
- net_g = SynthesizerTrnMs256NSFsid(*cpt["config"], is_half=config.is_half)
198
- else:
199
- net_g = SynthesizerTrnMs256NSFsid_nono(*cpt["config"])
200
- del net_g.enc_q
201
- print(net_g.load_state_dict(cpt["weight"], strict=False))
202
- net_g.eval().to(config.device)
203
- if config.is_half:
204
- net_g = net_g.half()
205
- else:
206
- net_g = net_g.float()
207
- vc = VC(tgt_sr, config)
208
- print(f"Model loaded: {model_name}")
209
- models.append((model_name, model_title, model_author, model_cover, create_vc_fn(tgt_sr, net_g, vc, if_f0, model_index)))
210
- categories.append([category_title, category_folder, description, models])
211
- with gr.Blocks() as app:
212
- gr.Markdown(
213
- "# <center> RVC Models\n"
214
- "## <center> The input audio should be clean and pure voice without background music.\n"
215
- "### <center> This project was inspired by [zomehwh](https://huggingface.co/spaces/zomehwh/rvc-models) and [ardha27](https://huggingface.co/spaces/ardha27/rvc-models)\n"
216
- "[![image](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/110kiMZTdP6Ri1lY9-NbQf17GVPPhHyeT?usp=sharing)\n\n"
217
- "[![Original Repo](https://badgen.net/badge/icon/github?icon=github&label=Original%20Repo)](https://github.com/RVC-Project/Retrieval-based-Voice-Conversion-WebUI)"
218
- )
219
- for (folder_title, folder, description, models) in categories:
220
- with gr.TabItem(folder_title):
221
- if description:
222
- gr.Markdown(f"### <center> {description}")
223
- with gr.Tabs():
224
- if not models:
225
- gr.Markdown("# <center> No Model Loaded.")
226
- gr.Markdown("## <center> Please added the model or fix your model path.")
227
- continue
228
- for (name, title, author, cover, vc_fn) in models:
229
- with gr.TabItem(name):
230
- with gr.Row():
231
- gr.Markdown(
232
- '<div align="center">'
233
- f'<div>{title}</div>\n'+
234
- (f'<div>Model author: {author}</div>' if author else "")+
235
- (f'<img style="width:auto;height:300px;" src="file/{cover}">' if cover else "")+
236
- '</div>'
237
- )
238
- with gr.Row():
239
- with gr.Column():
240
- vc_download_audio = gr.Dropdown(label="Provider", choices=["Youtube"], allow_custom_value=False, value="Youtube", info="Select provider [REQUIRED: UPLOAD MODE = OFF] (Default: Youtube)")
241
- vc_link = gr.Textbox(label="Youtube URL", info="Example: https://www.youtube.com/watch?v=Nc0sB1Bmf-A")
242
- vc_split_model = gr.Dropdown(label="Splitter Model", choices=["htdemucs", "mdx_extra_q"], allow_custom_value=False, value="htdemucs", info="Select the splitter model (Default: htdemucs)")
243
- vc_split = gr.Button("Split Audio", variant="primary")
244
- vc_vocal_preview = gr.Audio(label="Vocal Preview")
245
- vc_inst_preview = gr.Audio(label="Instrumental Preview")
246
- vc_audio_preview = gr.Audio(label="Audio Preview")
247
- with gr.Column():
248
- upload_mode = gr.Checkbox(label="Upload mode", value=False, info="Enable to upload audio instead of audio path")
249
- vc_input = gr.Textbox(label="Input audio path")
250
- vc_upload = gr.Audio(label="Upload audio file", visible=False, interactive=True)
251
- vc_transpose = gr.Number(label="Transpose", value=0, info='Type "12" to change from male to female voice. Type "-12" to change female to male voice')
252
- vc_f0method = gr.Radio(
253
- label="Pitch extraction algorithm",
254
- choices=["pm", "harvest"],
255
- value="pm",
256
- interactive=True,
257
- info="PM is fast but Harvest is better for low frequencies. (Default: PM)"
258
- )
259
- vc_index_ratio = gr.Slider(
260
- minimum=0,
261
- maximum=1,
262
- label="Retrieval feature ratio",
263
- value=0.6,
264
- interactive=True,
265
- info="(Default: 0.6)"
266
- )
267
- tts_mode = gr.Checkbox(label="tts (use edge-tts as input)", value=False)
268
- tts_text = gr.Textbox(visible=False, label="TTS text")
269
- tts_voice = gr.Dropdown(label="Edge-tts speaker", choices=voices, visible=False, allow_custom_value=False, value="en-US-AnaNeural-Female")
270
- vc_output = gr.Audio(label="Output Audio", interactive=False)
271
- vc_submit = gr.Button("Convert", variant="primary")
272
- with gr.Column():
273
- vc_volume = gr.Slider(
274
- minimum=0,
275
- maximum=10,
276
- label="Vocal volume",
277
- value=4,
278
- interactive=True,
279
- step=1,
280
- info="Adjust vocal volume (Default: 4}"
281
- )
282
- vc_combined_output = gr.Audio(label="Output Combined Audio")
283
- vc_combine = gr.Button("Combine",variant="primary")
284
- vc_submit.click(vc_fn, [vc_input, vc_upload, upload_mode, vc_transpose, vc_f0method, vc_index_ratio, tts_mode, tts_text, tts_voice], [vc_output])
285
- vc_split.click(cut_vocal_and_inst, [vc_link, vc_download_audio, vc_split_model], [vc_vocal_preview, vc_inst_preview, vc_audio_preview, vc_input])
286
- vc_combine.click(combine_vocal_and_inst, [vc_output, vc_volume, vc_split_model], vc_combined_output)
287
- tts_mode.change(change_to_tts_mode, [tts_mode, upload_mode], [vc_input, vc_upload, upload_mode, tts_text, tts_voice])
288
- upload_mode.change(change_to_upload_mode, [upload_mode], [vc_input, vc_upload])
289
- app.queue(concurrency_count=1, max_size=20, api_open=config.api).launch(share=config.colab)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
app.py CHANGED
@@ -138,12 +138,14 @@ if __name__ == '__main__':
138
  categories.append([category_title, category_folder, description, models])
139
  with gr.Blocks() as app:
140
  gr.Markdown(
141
- "# <center> RVC Models\n"
142
  "## <center> The input audio should be clean and pure voice without background music.\n"
143
- "## <center> [Recommended to use google colab for more feature](https://colab.research.google.com/drive/110kiMZTdP6Ri1lY9-NbQf17GVPPhHyeT?usp=sharing)\n"
 
144
  "### <center> This project was inspired by [zomehwh](https://huggingface.co/spaces/zomehwh/rvc-models) and [ardha27](https://huggingface.co/spaces/ardha27/rvc-models)\n"
145
  "[![image](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/110kiMZTdP6Ri1lY9-NbQf17GVPPhHyeT?usp=sharing)\n\n"
146
- "[![Original Repo](https://badgen.net/badge/icon/github?icon=github&label=Original%20Repo)](https://github.com/RVC-Project/Retrieval-based-Voice-Conversion-WebUI)"
 
147
  )
148
  for (folder_title, folder, description, models) in categories:
149
  with gr.TabItem(folder_title):
 
138
  categories.append([category_title, category_folder, description, models])
139
  with gr.Blocks() as app:
140
  gr.Markdown(
141
+ "# <center> RVC Genshin Impact\n"
142
  "## <center> The input audio should be clean and pure voice without background music.\n"
143
+ "## <center> [Recommended to use google colab to use all genshin model & feature](https://colab.research.google.com/drive/110kiMZTdP6Ri1lY9-NbQf17GVPPhHyeT?usp=sharing)\n"
144
+ "### <center> I limit the number of models to 15 due to an error caused by exceeding the available memory. (16 GB limit)\n"
145
  "### <center> This project was inspired by [zomehwh](https://huggingface.co/spaces/zomehwh/rvc-models) and [ardha27](https://huggingface.co/spaces/ardha27/rvc-models)\n"
146
  "[![image](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/110kiMZTdP6Ri1lY9-NbQf17GVPPhHyeT?usp=sharing)\n\n"
147
+ "[![Original RVC Repo](https://badgen.net/badge/icon/github?icon=github&label=Original%20Repo)](https://github.com/RVC-Project/Retrieval-based-Voice-Conversion-WebUI)"
148
+ "[![RVC Inference Repo](https://badgen.net/badge/icon/github?icon=github&label)](https://github.com/ArkanDash/rvc-inference)"
149
  )
150
  for (folder_title, folder, description, models) in categories:
151
  with gr.TabItem(folder_title):
requirements-full.txt DELETED
@@ -1,49 +0,0 @@
1
- numba==0.56.4
2
- numpy==1.23.5
3
- scipy==1.9.3
4
- librosa==0.9.2
5
- llvmlite==0.39.0
6
- fairseq==0.12.2
7
- faiss-cpu==1.7.0; sys_platform == "darwin"
8
- faiss-cpu==1.7.2; sys_platform != "darwin"
9
- gradio
10
- Cython
11
- future>=0.18.3
12
- pydub>=0.25.1
13
- soundfile>=0.12.1
14
- ffmpeg-python>=0.2.0
15
- tensorboardX
16
- functorch>=2.0.0
17
- Jinja2>=3.1.2
18
- json5>=0.9.11
19
- Markdown
20
- matplotlib>=3.7.1
21
- matplotlib-inline>=0.1.6
22
- praat-parselmouth>=0.4.3
23
- Pillow>=9.1.1
24
- pyworld>=0.3.2
25
- resampy>=0.4.2
26
- scikit-learn>=1.2.2
27
- starlette>=0.26.1
28
- tensorboard
29
- tensorboard-data-server
30
- tensorboard-plugin-wit
31
- torchgen>=0.0.1
32
- tqdm>=4.65.0
33
- tornado>=6.2
34
- Werkzeug>=2.2.3
35
- uc-micro-py>=1.0.1
36
- sympy>=1.11.1
37
- tabulate>=0.9.0
38
- PyYAML>=6.0
39
- pyasn1>=0.4.8
40
- pyasn1-modules>=0.2.8
41
- fsspec>=2023.3.0
42
- absl-py>=1.4.0
43
- audioread
44
- uvicorn>=0.21.1
45
- colorama>=0.4.6
46
- edge-tts
47
- demucs
48
- yt_dlp
49
- ffmpeg
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
weights/genshin-impact/fischl-jp/added_IVF1225_Flat_nprobe_1.index DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:0cb77f4db13ba45f4d48f711ad3c9e13159b9e8ff7b5e8ff0ac705ad0ed3bad4
3
- size 50599099
 
 
 
 
weights/genshin-impact/fischl-jp/cover.png DELETED

Git LFS Details

  • SHA256: d23421cb6899171aa821a3a060aa0f1feef4d293e06a2fa3f2398849ed8efa0b
  • Pointer size: 132 Bytes
  • Size of remote file: 1.5 MB
weights/genshin-impact/fischl-jp/fischl-jp.pth DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:28c04e4cd0ddb1abc8a3d0ad0b7aea94fba37b9ef5903e2f4e8c2e62015844f5
3
- size 55027589
 
 
 
 
weights/genshin-impact/kaeya-jp/added_IVF1655_Flat_nprobe_1.index DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:1afba8e77f937bf76bd10f54fcbb4ca119dc3b2c599dc4cbf0843d618c26ad15
3
- size 68325763
 
 
 
 
weights/genshin-impact/kaeya-jp/cover.png DELETED

Git LFS Details

  • SHA256: e4b12f3c2ce55d28d8cadcd17464d0922e55aac37495654a7bcfbb03c8beffd5
  • Pointer size: 131 Bytes
  • Size of remote file: 623 kB
weights/genshin-impact/kaeya-jp/kaeya-jp.pth DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:6492cc522ecaef80436bae3ebdb6e6bd9e6242dc6be74c4e6283c7825285d480
3
- size 55027130
 
 
 
 
weights/genshin-impact/model_info.json CHANGED
@@ -95,14 +95,6 @@
95
  "feature_retrieval_library": "added_IVF1036_Flat_nprobe_1.index",
96
  "author":"ArkanDash"
97
  },
98
- "fischl-jp": {
99
- "enable": true,
100
- "name": "fischl-jp",
101
- "title": "Genshin Impact - Fischl",
102
- "cover": "cover.png",
103
- "feature_retrieval_library": "added_IVF1225_Flat_nprobe_1.index",
104
- "author":"ArkanDash"
105
- },
106
  "ayato-jp": {
107
  "enable": true,
108
  "name": "ayato-jp",
@@ -126,13 +118,5 @@
126
  "cover": "cover.png",
127
  "feature_retrieval_library": "added_IVF1672_Flat_nprobe_1.index",
128
  "author":"ArkanDash"
129
- },
130
- "kaeya-jp": {
131
- "enable": true,
132
- "name": "kaeya-jp",
133
- "title": "Genshin Impact - Kaeya",
134
- "cover": "cover.png",
135
- "feature_retrieval_library": "added_IVF1655_Flat_nprobe_1.index",
136
- "author":"ArkanDash"
137
- }
138
  }
 
95
  "feature_retrieval_library": "added_IVF1036_Flat_nprobe_1.index",
96
  "author":"ArkanDash"
97
  },
 
 
 
 
 
 
 
 
98
  "ayato-jp": {
99
  "enable": true,
100
  "name": "ayato-jp",
 
118
  "cover": "cover.png",
119
  "feature_retrieval_library": "added_IVF1672_Flat_nprobe_1.index",
120
  "author":"ArkanDash"
121
+ }
 
 
 
 
 
 
 
 
122
  }