kevinwang676 commited on
Commit
4adf448
1 Parent(s): b4036a5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +220 -17
app.py CHANGED
@@ -48,8 +48,40 @@ _ = utils.load_checkpoint("checkpoints/freevc-s.pth", freevc_s, None)
48
 
49
  print("Loading WavLM for content...")
50
  cmodel = WavLMModel.from_pretrained("microsoft/wavlm-large").to(device)
51
-
52
- def convert(model, src, tgt):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
53
  with torch.no_grad():
54
  # tgt
55
  wav_tgt, _ = librosa.load(tgt, sr=hps.data.sampling_rate)
@@ -60,7 +92,7 @@ def convert(model, src, tgt):
60
  else:
61
  wav_tgt = torch.from_numpy(wav_tgt).unsqueeze(0).to(device)
62
  mel_tgt = mel_spectrogram_torch(
63
- wav_tgt,
64
  hps.data.filter_length,
65
  hps.data.n_mel_channels,
66
  hps.data.sampling_rate,
@@ -70,6 +102,17 @@ def convert(model, src, tgt):
70
  hps.data.mel_fmax
71
  )
72
  # src
 
 
 
 
 
 
 
 
 
 
 
73
  wav_src, _ = librosa.load(src, sr=hps.data.sampling_rate)
74
  wav_src = torch.from_numpy(wav_src).unsqueeze(0).to(device)
75
  c = cmodel(wav_src).last_hidden_state.transpose(1, 2).to(device)
@@ -82,22 +125,182 @@ def convert(model, src, tgt):
82
  audio = freevc_24.infer(c, g=g_tgt)
83
  audio = audio[0][0].data.cpu().float().numpy()
84
  if model == "FreeVC" or model == "FreeVC-s":
85
- write("out.wav", hps.data.sampling_rate, audio)
86
  else:
87
- write("out.wav", 24000, audio)
88
- out = "out.wav"
89
- return out
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
90
 
91
- model = gr.Dropdown(choices=["FreeVC", "FreeVC-s", "FreeVC (24kHz)"], value="FreeVC",type="value", label="Model")
92
- audio1 = gr.Audio(label="Source Audio", type='filepath')
93
- audio2 = gr.Audio(label="Reference Audio", type='filepath')
94
- inputs = [model, audio1, audio2]
95
- outputs = gr.Audio(label="Output Audio", type='filepath')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
96
 
97
- title = "FreeVC"
98
- description = "Gradio Demo for FreeVC: Towards High-Quality Text-Free One-Shot Voice Conversion. To use it, simply upload your audio, or click the example to load. Read more at the links below. Note: It seems that the WavLM checkpoint in HuggingFace is a little different from the one used to train FreeVC, which may degrade the performance a bit. In addition, speaker similarity can be largely affected if there are too much silence in the reference audio, so please <strong>trim</strong> it before submitting."
99
- article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2210.15418' target='_blank'>Paper</a> | <a href='https://github.com/OlaWod/FreeVC' target='_blank'>Github Repo</a></p>"
 
 
 
 
 
 
 
 
 
 
 
100
 
101
- examples=[["FreeVC", 'p225_001.wav', 'p226_002.wav'], ["FreeVC-s", 'p226_002.wav', 'p225_001.wav'], ["FreeVC (24kHz)", 'p225_001.wav', 'p226_002.wav']]
 
 
 
 
 
 
 
 
102
 
103
- gr.Interface(convert, inputs, outputs, title=title, description=description, article=article, examples=examples, enable_queue=True).launch()
 
48
 
49
  print("Loading WavLM for content...")
50
  cmodel = WavLMModel.from_pretrained("microsoft/wavlm-large").to(device)
51
+
52
+
53
+ from openai import OpenAI
54
+
55
+ import ffmpeg
56
+ import urllib.request
57
+ urllib.request.urlretrieve("https://download.openxlab.org.cn/models/Kevin676/rvc-models/weight/UVR-HP2.pth", "uvr5/uvr_model/UVR-HP2.pth")
58
+ urllib.request.urlretrieve("https://download.openxlab.org.cn/models/Kevin676/rvc-models/weight/UVR-HP5.pth", "uvr5/uvr_model/UVR-HP5.pth")
59
+
60
+ from uvr5.vr import AudioPre
61
+ weight_uvr5_root = "uvr5/uvr_model"
62
+ uvr5_names = []
63
+ for name in os.listdir(weight_uvr5_root):
64
+ if name.endswith(".pth") or "onnx" in name:
65
+ uvr5_names.append(name.replace(".pth", ""))
66
+
67
+ func = AudioPre
68
+
69
+ pre_fun_hp2 = func(
70
+ agg=int(10),
71
+ model_path=os.path.join(weight_uvr5_root, "UVR-HP2.pth"),
72
+ device="cuda",
73
+ is_half=True,
74
+ )
75
+ pre_fun_hp5 = func(
76
+ agg=int(10),
77
+ model_path=os.path.join(weight_uvr5_root, "UVR-HP5.pth"),
78
+ device="cuda",
79
+ is_half=True,
80
+ )
81
+
82
+
83
+ def convert(api_key, text, tgt, voice, save_path):
84
+ model = "FreeVC (24kHz)"
85
  with torch.no_grad():
86
  # tgt
87
  wav_tgt, _ = librosa.load(tgt, sr=hps.data.sampling_rate)
 
92
  else:
93
  wav_tgt = torch.from_numpy(wav_tgt).unsqueeze(0).to(device)
94
  mel_tgt = mel_spectrogram_torch(
95
+ wav_tgt,
96
  hps.data.filter_length,
97
  hps.data.n_mel_channels,
98
  hps.data.sampling_rate,
 
102
  hps.data.mel_fmax
103
  )
104
  # src
105
+ client = OpenAI(api_key=api_key)
106
+
107
+ response = client.audio.speech.create(
108
+ model="tts-1-hd",
109
+ voice=voice,
110
+ input=text,
111
+ )
112
+
113
+ response.stream_to_file("output_openai.mp3")
114
+
115
+ src = "output_openai.mp3"
116
  wav_src, _ = librosa.load(src, sr=hps.data.sampling_rate)
117
  wav_src = torch.from_numpy(wav_src).unsqueeze(0).to(device)
118
  c = cmodel(wav_src).last_hidden_state.transpose(1, 2).to(device)
 
125
  audio = freevc_24.infer(c, g=g_tgt)
126
  audio = audio[0][0].data.cpu().float().numpy()
127
  if model == "FreeVC" or model == "FreeVC-s":
128
+ write(f"output/{save_path}.wav", hps.data.sampling_rate, audio)
129
  else:
130
+ write(f"output/{save_path}.wav", 24000, audio)
131
+ return f"output/{save_path}.wav"
132
+
133
+
134
+ class subtitle:
135
+ def __init__(self,index:int, start_time, end_time, text:str):
136
+ self.index = int(index)
137
+ self.start_time = start_time
138
+ self.end_time = end_time
139
+ self.text = text.strip()
140
+ def normalize(self,ntype:str,fps=30):
141
+ if ntype=="prcsv":
142
+ h,m,s,fs=(self.start_time.replace(';',':')).split(":")#seconds
143
+ self.start_time=int(h)*3600+int(m)*60+int(s)+round(int(fs)/fps,2)
144
+ h,m,s,fs=(self.end_time.replace(';',':')).split(":")
145
+ self.end_time=int(h)*3600+int(m)*60+int(s)+round(int(fs)/fps,2)
146
+ elif ntype=="srt":
147
+ h,m,s=self.start_time.split(":")
148
+ s=s.replace(",",".")
149
+ self.start_time=int(h)*3600+int(m)*60+round(float(s),2)
150
+ h,m,s=self.end_time.split(":")
151
+ s=s.replace(",",".")
152
+ self.end_time=int(h)*3600+int(m)*60+round(float(s),2)
153
+ else:
154
+ raise ValueError
155
+ def add_offset(self,offset=0):
156
+ self.start_time+=offset
157
+ if self.start_time<0:
158
+ self.start_time=0
159
+ self.end_time+=offset
160
+ if self.end_time<0:
161
+ self.end_time=0
162
+ def __str__(self) -> str:
163
+ return f'id:{self.index},start:{self.start_time},end:{self.end_time},text:{self.text}'
164
+
165
+ def read_srt(uploaded_file):
166
+ offset=0
167
+ with open(uploaded_file.name,"r",encoding="utf-8") as f:
168
+ file=f.readlines()
169
+ subtitle_list=[]
170
+ indexlist=[]
171
+ filelength=len(file)
172
+ for i in range(0,filelength):
173
+ if " --> " in file[i]:
174
+ is_st=True
175
+ for char in file[i-1].strip().replace("\ufeff",""):
176
+ if char not in ['0','1','2','3','4','5','6','7','8','9']:
177
+ is_st=False
178
+ break
179
+ if is_st:
180
+ indexlist.append(i) #get line id
181
+ listlength=len(indexlist)
182
+ for i in range(0,listlength-1):
183
+ st,et=file[indexlist[i]].split(" --> ")
184
+ id=int(file[indexlist[i]-1].strip().replace("\ufeff",""))
185
+ text=""
186
+ for x in range(indexlist[i]+1,indexlist[i+1]-2):
187
+ text+=file[x]
188
+ st=subtitle(id,st,et,text)
189
+ st.normalize(ntype="srt")
190
+ st.add_offset(offset=offset)
191
+ subtitle_list.append(st)
192
+ st,et=file[indexlist[-1]].split(" --> ")
193
+ id=file[indexlist[-1]-1]
194
+ text=""
195
+ for x in range(indexlist[-1]+1,filelength):
196
+ text+=file[x]
197
+ st=subtitle(id,st,et,text)
198
+ st.normalize(ntype="srt")
199
+ st.add_offset(offset=offset)
200
+ subtitle_list.append(st)
201
+ return subtitle_list
202
+
203
+ from pydub import AudioSegment
204
+
205
+ def trim_audio(intervals, input_file_path, output_file_path):
206
+ # load the audio file
207
+ audio = AudioSegment.from_file(input_file_path)
208
+
209
+ # iterate over the list of time intervals
210
+ for i, (start_time, end_time) in enumerate(intervals):
211
+ # extract the segment of the audio
212
+ segment = audio[start_time*1000:end_time*1000]
213
+
214
+ # construct the output file path
215
+ output_file_path_i = f"{output_file_path}_{i}.wav"
216
+
217
+ # export the segment to a file
218
+ segment.export(output_file_path_i, format='wav')
219
+
220
+ import re
221
+
222
+ def merge_audios(input_dir):
223
+ output_file = "AI配音版.wav"
224
+ # List all .wav files in the directory
225
+ files = [f for f in os.listdir(input_dir) if f.endswith('.wav')]
226
+
227
+ # Sort files based on the numerical order extracted from their names
228
+ sorted_files = sorted(files, key=lambda x: int(re.search(r'(\d+)', x).group()))
229
+
230
+ # Initialize an empty audio segment
231
+ combined = AudioSegment.empty()
232
+
233
+ # Loop through the sorted list and concatenate them
234
+ for file in sorted_files:
235
+ path = os.path.join(input_dir, file)
236
+ audio = AudioSegment.from_wav(path)
237
+ combined += audio
238
+ print(f"Merged: {file}")
239
+
240
+ # Export the combined audio
241
+ combined.export(output_file, format="wav")
242
+ return "AI配音版.wav"
243
+
244
+ import shutil
245
+
246
+ def convert_from_srt(apikey, filename, video_full, voice, split_model, multilingual):
247
+ subtitle_list = read_srt(filename)
248
 
249
+ if os.path.exists("audio_full.wav"):
250
+ os.remove("audio_full.wav")
251
+
252
+ ffmpeg.input(video_full).output("audio_full.wav", ac=2, ar=44100).run()
253
+
254
+ if split_model=="UVR-HP2":
255
+ pre_fun = pre_fun_hp2
256
+ else:
257
+ pre_fun = pre_fun_hp5
258
+
259
+ filename = "output"
260
+ pre_fun._path_audio_("audio_full.wav", f"./denoised/{split_model}/{filename}/", f"./denoised/{split_model}/{filename}/", "wav")
261
+ if os.path.isdir("output"):
262
+ shutil.rmtree("output")
263
+ if multilingual==False:
264
+ for i in subtitle_list:
265
+ os.makedirs("output", exist_ok=True)
266
+ trim_audio([[i.start_time, i.end_time]], f"./denoised/{split_model}/{filename}/vocal_audio_full.wav_10.wav", f"sliced_audio_{i.index}")
267
+ print(f"正在合成第{i.index}条语音")
268
+ print(f"语音内容:{i.text}")
269
+ convert(apikey, i.text, f"sliced_audio_{i.index}_0.wav", voice, i.text + " " + str(i.index))
270
+ else:
271
+ for i in subtitle_list:
272
+ os.makedirs("output", exist_ok=True)
273
+ trim_audio([[i.start_time, i.end_time]], f"./denoised/{split_model}/{filename}/vocal_audio_full.wav_10.wav", f"sliced_audio_{i.index}")
274
+ print(f"正在合成第{i.index}条语音")
275
+ print(f"语音内容:{i.text.splitlines()[1]}")
276
+ convert(apikey, i.text.splitlines()[1], f"sliced_audio_{i.index}_0.wav", voice, i.text.splitlines()[1] + " " + str(i.index))
277
+
278
+ return merge_audios("output")
279
+
280
 
281
+ with gr.Blocks() as app:
282
+ gr.Markdown("# <center>🌊💕🎶 XTTS - SRT文件一键AI配音</center>")
283
+ gr.Markdown("### <center>🌟 只需上传SRT文件和原版配音文件即可,每次一集视频AI自动配音!Developed by Kevin Wang </center>")
284
+ with gr.Row():
285
+ with gr.Column():
286
+ inp0 = gr.Textbox(type='password', label='请输入您的OpenAI API Key')
287
+ inp1 = gr.File(file_count="single", label="请上传一集视频对应的SRT文件")
288
+ inp2 = gr.Video(label="请上传一集包含原声配音的视频", info="需要是.mp4视频文件")
289
+ inp3 = gr.Dropdown(choices=['alloy', 'echo', 'fable', 'onyx', 'nova', 'shimmer'], label='请选择一个说话人提供基础音色', info="试听音色链接:https://platform.openai.com/docs/guides/text-to-speech/voice-options", value='alloy')
290
+ inp4 = gr.Dropdown(label="请选择用于分离伴奏的模型", info="UVR-HP5去除背景音乐效果更好,但会对人声造成一定的损伤", choices=["UVR-HP2", "UVR-HP5"], value="UVR-HP5")
291
+ inp5 = gr.Checkbox(label="SRT文件是否为双语字幕", info="若为双语字幕,请打勾选择(SRT文件中需要先出现中文字幕,后英文字幕;中英字幕各占一行)")
292
+ btn = gr.Button("一键开启AI配音吧💕", variant="primary")
293
+ with gr.Column():
294
+ out1 = gr.Audio(label="为您生成的AI完整配音", type="filepath")
295
 
296
+ btn.click(convert_from_srt, [inp0, inp1, inp2, inp3, inp4, inp5], [out1])
297
+
298
+ gr.Markdown("### <center>注意❗:请勿生成会对任何个人或组织造成侵害的内容,请尊重他人的著作权和知识产权。用户对此程序的任何使用行为与程序开发者无关。</center>")
299
+ gr.HTML('''
300
+ <div class="footer">
301
+ <p>🌊🏞️🎶 - 江水东流急,滔滔无尽声。 明·顾璘
302
+ </p>
303
+ </div>
304
+ ''')
305
 
306
+ app.launch(share=True, show_error=True)