Spaces:
Sleeping
Sleeping
File size: 17,509 Bytes
37981bf e1f204c 37981bf e1f204c 9577a1b e1f204c 37981bf 9577a1b 37981bf 9577a1b 37981bf ada57ff 37981bf ada57ff 37981bf ada57ff 37981bf ada57ff 37981bf 6829dab 37981bf 6829dab 37981bf 6829dab 1d505ec 6829dab ada57ff 1d505ec 6829dab 1d505ec 6829dab 1d505ec 6829dab 37981bf bab39d8 6b54c49 bab39d8 6b54c49 bab39d8 6b54c49 bab39d8 6b54c49 bab39d8 6b54c49 40d4af7 6b54c49 40d4af7 6b54c49 40d4af7 bab39d8 e1f204c 37981bf 6b54c49 40d4af7 37981bf 6a3fe23 9577a1b 6a3fe23 37981bf 6a3fe23 9577a1b 6a3fe23 37981bf bab39d8 6b54c49 37981bf bab39d8 37981bf e1f204c 6b54c49 e1f204c 6b54c49 e1f204c 37981bf e1f204c 6b54c49 e1f204c 6b54c49 e1f204c 37981bf e1f204c bab39d8 e1f204c bab39d8 6b54c49 bab39d8 e1f204c bab39d8 e1f204c 37981bf |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 |
import os
import torch
import librosa
import gradio as gr
from scipy.io.wavfile import write
from transformers import WavLMModel
import utils
from models import SynthesizerTrn
from mel_processing import mel_spectrogram_torch
from speaker_encoder.voice_encoder import SpeakerEncoder
'''
def get_wavlm():
os.system('gdown https://drive.google.com/uc?id=12-cB34qCTvByWT-QtOcZaqwwO21FLSqU')
shutil.move('WavLM-Large.pt', 'wavlm')
'''
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print("Loading FreeVC...")
hps = utils.get_hparams_from_file("configs/freevc.json")
freevc = SynthesizerTrn(
hps.data.filter_length // 2 + 1,
hps.train.segment_size // hps.data.hop_length,
**hps.model).to(device)
_ = freevc.eval()
_ = utils.load_checkpoint("checkpoints/freevc.pth", freevc, None)
smodel = SpeakerEncoder('speaker_encoder/ckpt/pretrained_bak_5805000.pt')
print("Loading FreeVC(24k)...")
hps = utils.get_hparams_from_file("configs/freevc-24.json")
freevc_24 = SynthesizerTrn(
hps.data.filter_length // 2 + 1,
hps.train.segment_size // hps.data.hop_length,
**hps.model).to(device)
_ = freevc_24.eval()
_ = utils.load_checkpoint("checkpoints/freevc-24.pth", freevc_24, None)
print("Loading FreeVC-s...")
hps = utils.get_hparams_from_file("configs/freevc-s.json")
freevc_s = SynthesizerTrn(
hps.data.filter_length // 2 + 1,
hps.train.segment_size // hps.data.hop_length,
**hps.model).to(device)
_ = freevc_s.eval()
_ = utils.load_checkpoint("checkpoints/freevc-s.pth", freevc_s, None)
print("Loading WavLM for content...")
cmodel = WavLMModel.from_pretrained("microsoft/wavlm-large").to(device)
import ffmpeg
import random
import numpy as np
from elevenlabs.client import ElevenLabs
def pad_buffer(audio):
# Pad buffer to multiple of 2 bytes
buffer_size = len(audio)
element_size = np.dtype(np.int16).itemsize
if buffer_size % element_size != 0:
audio = audio + b'\0' * (element_size - (buffer_size % element_size))
return audio
def generate_voice(api_key, text, voice):
client = ElevenLabs(
api_key=api_key, # Defaults to ELEVEN_API_KEY
)
audio = client.generate(text=text, voice=voice) #response.voices[0]
audio = b"".join(audio)
with open("output.mp3", "wb") as f:
f.write(audio)
return "output.mp3"
html_denoise = """
<html>
<head>
</script>
<link rel="stylesheet" href="https://gradio.s3-us-west-2.amazonaws.com/2.6.2/static/bundle.css">
</head>
<body>
<div id="target"></div>
<script src="https://gradio.s3-us-west-2.amazonaws.com/2.6.2/static/bundle.js"></script>
<script
type="module"
src="https://gradio.s3-us-west-2.amazonaws.com/4.15.0/gradio.js"
></script>
<iframe
src="https://g-app-center-40055665-8145-0zp6jbv.openxlab.space"
frameBorder="0"
width="1280"
height="700"
></iframe>
</body>
</html>
"""
def convert(api_key, text, tgt, voice, save_path):
model = "FreeVC (24kHz)"
with torch.no_grad():
# tgt
wav_tgt, _ = librosa.load(tgt, sr=hps.data.sampling_rate)
wav_tgt, _ = librosa.effects.trim(wav_tgt, top_db=20)
if model == "FreeVC" or model == "FreeVC (24kHz)":
g_tgt = smodel.embed_utterance(wav_tgt)
g_tgt = torch.from_numpy(g_tgt).unsqueeze(0).to(device)
else:
wav_tgt = torch.from_numpy(wav_tgt).unsqueeze(0).to(device)
mel_tgt = mel_spectrogram_torch(
wav_tgt,
hps.data.filter_length,
hps.data.n_mel_channels,
hps.data.sampling_rate,
hps.data.hop_length,
hps.data.win_length,
hps.data.mel_fmin,
hps.data.mel_fmax
)
# src
src = generate_voice(api_key, text, voice)
wav_src, _ = librosa.load(src, sr=hps.data.sampling_rate)
wav_src = torch.from_numpy(wav_src).unsqueeze(0).to(device)
c = cmodel(wav_src).last_hidden_state.transpose(1, 2).to(device)
# infer
if model == "FreeVC":
audio = freevc.infer(c, g=g_tgt)
elif model == "FreeVC-s":
audio = freevc_s.infer(c, mel=mel_tgt)
else:
audio = freevc_24.infer(c, g=g_tgt)
audio = audio[0][0].data.cpu().float().numpy()
if model == "FreeVC" or model == "FreeVC-s":
write(f"output/{save_path}.wav", hps.data.sampling_rate, audio)
else:
write(f"output/{save_path}.wav", 24000, audio)
return f"output/{save_path}.wav"
class subtitle:
def __init__(self,index:int, start_time, end_time, text:str):
self.index = int(index)
self.start_time = start_time
self.end_time = end_time
self.text = text.strip()
def normalize(self,ntype:str,fps=30):
if ntype=="prcsv":
h,m,s,fs=(self.start_time.replace(';',':')).split(":")#seconds
self.start_time=int(h)*3600+int(m)*60+int(s)+round(int(fs)/fps,5)
h,m,s,fs=(self.end_time.replace(';',':')).split(":")
self.end_time=int(h)*3600+int(m)*60+int(s)+round(int(fs)/fps,5)
elif ntype=="srt":
h,m,s=self.start_time.split(":")
s=s.replace(",",".")
self.start_time=int(h)*3600+int(m)*60+round(float(s),5)
h,m,s=self.end_time.split(":")
s=s.replace(",",".")
self.end_time=int(h)*3600+int(m)*60+round(float(s),5)
else:
raise ValueError
def add_offset(self,offset=0):
self.start_time+=offset
if self.start_time<0:
self.start_time=0
self.end_time+=offset
if self.end_time<0:
self.end_time=0
def __str__(self) -> str:
return f'id:{self.index},start:{self.start_time},end:{self.end_time},text:{self.text}'
def read_srt(uploaded_file):
offset=0
with open(uploaded_file.name,"r",encoding="utf-8") as f:
file=f.readlines()
subtitle_list=[]
indexlist=[]
filelength=len(file)
for i in range(0,filelength):
if " --> " in file[i]:
is_st=True
for char in file[i-1].strip().replace("\ufeff",""):
if char not in ['0','1','2','3','4','5','6','7','8','9']:
is_st=False
break
if is_st:
indexlist.append(i) #get line id
listlength=len(indexlist)
for i in range(0,listlength-1):
st,et=file[indexlist[i]].split(" --> ")
id=int(file[indexlist[i]-1].strip().replace("\ufeff",""))
text=""
for x in range(indexlist[i]+1,indexlist[i+1]-2):
text+=file[x]
st=subtitle(id,st,et,text)
st.normalize(ntype="srt")
st.add_offset(offset=offset)
subtitle_list.append(st)
st,et=file[indexlist[-1]].split(" --> ")
id=file[indexlist[-1]-1]
text=""
for x in range(indexlist[-1]+1,filelength):
text+=file[x]
st=subtitle(id,st,et,text)
st.normalize(ntype="srt")
st.add_offset(offset=offset)
subtitle_list.append(st)
return subtitle_list
import webrtcvad
from pydub import AudioSegment
from pydub.utils import make_chunks
def vad(audio_name, out_path_name):
audio = AudioSegment.from_file(audio_name, format="wav")
# Set the desired sample rate (WebRTC VAD supports only 8000, 16000, 32000, or 48000 Hz)
audio = audio.set_frame_rate(48000)
# Set single channel (mono)
audio = audio.set_channels(1)
# Initialize VAD
vad = webrtcvad.Vad()
# Set aggressiveness mode (an integer between 0 and 3, 3 is the most aggressive)
vad.set_mode(3)
# Convert pydub audio to bytes
frame_duration = 30 # Duration of a frame in ms
frame_width = int(audio.frame_rate * frame_duration / 1000) # width of a frame in samples
frames = make_chunks(audio, frame_duration)
# Perform voice activity detection
voiced_frames = []
for frame in frames:
if len(frame.raw_data) < frame_width * 2: # Ensure frame is correct length
break
is_speech = vad.is_speech(frame.raw_data, audio.frame_rate)
if is_speech:
voiced_frames.append(frame)
# Combine voiced frames back to an audio segment
voiced_audio = sum(voiced_frames, AudioSegment.silent(duration=0))
voiced_audio.export(f"{out_path_name}.wav", format="wav")
def trim_audio(intervals, input_file_path, output_file_path):
# load the audio file
audio = AudioSegment.from_file(input_file_path)
# iterate over the list of time intervals
for i, (start_time, end_time) in enumerate(intervals):
# extract the segment of the audio
segment = audio[start_time*1000:end_time*1000]
output_file_path_i = f"increased_{i}.wav"
if len(segment) < 5000:
# Calculate how many times to repeat the audio to make it at least 5 seconds long
repeat_count = (5000 // len(segment)) + 3
# Repeat the audio
longer_audio = segment * repeat_count
# Save the extended audio
print(f"Audio was less than 5 seconds. Extended to {len(longer_audio)} milliseconds.")
longer_audio.export(output_file_path_i, format='wav')
vad(f"{output_file_path_i}", f"{output_file_path}_{i}")
else:
print("Audio is already 5 seconds or longer.")
segment.export(f"{output_file_path}_{i}.wav", format='wav')
import re
def sort_key(file_name):
"""Extract the last number in the file name for sorting."""
numbers = re.findall(r'\d+', file_name)
if numbers:
return int(numbers[-1])
return -1 # In case there's no number, this ensures it goes to the start.
def merge_audios(folder_path):
output_file = "AI配音版.wav"
# Get all WAV files in the folder
files = [f for f in os.listdir(folder_path) if f.endswith('.wav')]
# Sort files based on the last digit in their names
sorted_files = sorted(files, key=sort_key)
# Initialize an empty audio segment
merged_audio = AudioSegment.empty()
# Loop through each file, in order, and concatenate them
for file in sorted_files:
audio = AudioSegment.from_wav(os.path.join(folder_path, file))
merged_audio += audio
print(f"Merged: {file}")
# Export the merged audio to a new file
merged_audio.export(output_file, format="wav")
return "AI配音版.wav"
import shutil
# get a zip file
import zipfile
def zip_sliced_files(directory, zip_filename, chosen_name):
# Create a ZipFile object
with zipfile.ZipFile(zip_filename, 'w') as zipf:
# Iterate over all files in the directory
for foldername, subfolders, filenames in os.walk(directory):
for filename in filenames:
# Check if the file starts with "sliced" and has a .wav extension
if filename.startswith(f"{chosen_name}") and filename.endswith(".wav"):
# Create the complete file path
file_path = os.path.join(foldername, filename)
# Add the file to the zip file
zipf.write(file_path, arcname=filename)
print(f"Added {filename} to {zip_filename}")
# set speed
from pydub.effects import speedup
def change_speed(input_file, speed=1.0):
# Load the audio file
audio = AudioSegment.from_file(input_file)
# Change the speed of the audio
faster_audio = speedup(audio, playback_speed=speed)
# Export the modified audio to a new file
faster_audio.export("speed_changed_speech.wav", format="wav")
return "speed_changed_speech.wav"
# delete files first
def delete_sliced_files(directory, chosen_name):
# Iterate over all files in the directory
for foldername, subfolders, filenames in os.walk(directory):
for filename in filenames:
# Check if the file starts with "sliced"
if filename.startswith(f"{chosen_name}"):
# Create the complete file path
file_path = os.path.join(foldername, filename)
# Delete the file
os.remove(file_path)
print(f"Deleted {filename}")
def convert_from_srt(api_key, filename, audio_full, voice, multilingual):
subtitle_list = read_srt(filename)
delete_sliced_files("./", "sliced")
#audio_data, sr = librosa.load(audio_full, sr=44100)
#write("audio_full.wav", sr, audio_data.astype(np.int16))
if os.path.isdir("output"):
shutil.rmtree("output")
if multilingual==False:
for i in subtitle_list:
try:
os.makedirs("output", exist_ok=True)
trim_audio([[i.start_time, i.end_time]], audio_full, f"sliced_audio_{i.index}")
print(f"正在合成第{i.index}条语音")
print(f"语音内容:{i.text}")
convert(api_key, i.text, f"sliced_audio_{i.index}_0.wav", voice, i.text + " " + str(i.index))
except Exception:
pass
else:
for i in subtitle_list:
try:
os.makedirs("output", exist_ok=True)
trim_audio([[i.start_time, i.end_time]], audio_full, f"sliced_audio_{i.index}")
print(f"正在合成第{i.index}条语音")
print(f"语音内容:{i.text.splitlines()[1]}")
convert(api_key, i.text.splitlines()[1], f"sliced_audio_{i.index}_0.wav", voice, i.text.splitlines()[1] + " " + str(i.index))
except Exception:
pass
merge_audios("output")
zip_sliced_files("./", "参考音频.zip", "sliced")
return "AI配音版.wav", "参考音频.zip"
restart_markdown = ("""
### 若此页面无法正常显示,请点击[此链接](https://openxlab.org.cn/apps/detail/Kevin676/OpenAI-TTS)唤醒该程序!谢谢🍻
""")
import ffmpeg
def save_file_with_new_name(original_file_path, new_file_path):
shutil.copyfile(original_file_path, new_file_path)
def denoise(input_files):
delete_sliced_files("./", "input_video")
#if os.path.exists("audio_full.wav"):
# os.remove("audio_full.wav")
for video_file in input_files:
name1 = video_file.name
file_name_with_extension = name1.split('/')[-1]
file_name1 = file_name_with_extension.split('.mp4')[0] + ".mp4"
save_file_with_new_name(video_file.name, file_name1)
ffmpeg.input(file_name1).output("input_video" + file_name1 + ".wav", ac=2, ar=44100).run()
zip_sliced_files("./", "转换后的音频.zip", "input_video")
return "转换后的音频.zip"
with gr.Blocks() as app:
gr.Markdown("# <center>🌊💕🎶 11Labs TTS - SRT文件一键AI配音</center>")
gr.Markdown("### <center>🌟 只需上传SRT文件和原版配音文件即可,每次一集视频AI自动配音!Developed by Kevin Wang </center>")
with gr.Tab("📺视频转音频"):
with gr.Row():
inp_video = gr.Files(label="您可以上传多集包含原声配音的视频", file_types=['.mp4'])
btn_convert = gr.Button("视频文件转音频", variant="primary")
out_audio = gr.File(label="包含所有配音音频的zip文件")
btn_convert.click(denoise, [inp_video], [out_audio])
with gr.Tab("🎶AI配音"):
with gr.Row():
with gr.Column():
inp0 = gr.Textbox(type='password', label='请输入您的11Labs API Key')
inp1 = gr.File(file_count="single", label="请上传一集视频对应的SRT文件")
inp2 = gr.Audio(label="请上传一集视频的配音文件", type="filepath")
inp3 = gr.Dropdown(choices=["Rachel", "Alice", "Chris", "Adam"], label='请选择一个说话人提供基础音色', info="试听音色链接:https://elevenlabs.io/app/speech-synthesis", value='Chris')
#inp4 = gr.Dropdown(label="请选择用于分离伴奏的模型", info="UVR-HP5去除背景音乐效果更好,但会对人声造成一定的损伤", choices=["UVR-HP2", "UVR-HP5"], value="UVR-HP5")
inp4 = gr.Checkbox(label="SRT文件是否为双语字幕", info="若为双语字幕,请打勾选择(SRT文件中需要先出现中文字幕,后英文字幕;中英字幕各占一行)")
btn1 = gr.Button("一键开启AI配音吧💕", variant="primary")
with gr.Column():
out1 = gr.Audio(label="为您生成的AI完整配音", type="filepath")
out2 = gr.File(label="包含所有参考音频的zip文件")
inp_speed = gr.Slider(label="设置AI配音的速度", minimum=1.02, maximum=1.5, value=1.02, step=0.01)
btn2 = gr.Button("一键改变AI配音速度")
out3 = gr.Audio(label="变速后的AI配音", type="filepath")
btn1.click(convert_from_srt, [inp0, inp1, inp2, inp3, inp4], [out1, out2])
btn2.click(change_speed, [out1, inp_speed], [out3])
gr.Markdown("### <center>注意❗:请勿生成会对任何个人或组织造成侵害的内容,请尊重他人的著作权和知识产权。用户对此程序的任何使用行为与程序开发者无关。</center>")
gr.HTML('''
<div class="footer">
<p>🌊🏞️🎶 - 江水东流急,滔滔无尽声。 明·顾璘
</p>
</div>
''')
app.launch(share=True, show_error=True) |