|
import os |
|
import torch |
|
import librosa |
|
import gradio as gr |
|
from scipy.io.wavfile import write |
|
from transformers import WavLMModel |
|
|
|
import utils |
|
from models import SynthesizerTrn |
|
from mel_processing import mel_spectrogram_torch |
|
from speaker_encoder.voice_encoder import SpeakerEncoder |
|
|
|
''' |
|
def get_wavlm(): |
|
os.system('gdown https://drive.google.com/uc?id=12-cB34qCTvByWT-QtOcZaqwwO21FLSqU') |
|
shutil.move('WavLM-Large.pt', 'wavlm') |
|
''' |
|
|
|
device = torch.device("cuda" if torch.cuda.is_available() else "cpu") |
|
|
|
print("Loading FreeVC...") |
|
hps = utils.get_hparams_from_file("configs/freevc.json") |
|
freevc = SynthesizerTrn( |
|
hps.data.filter_length // 2 + 1, |
|
hps.train.segment_size // hps.data.hop_length, |
|
**hps.model).to(device) |
|
_ = freevc.eval() |
|
_ = utils.load_checkpoint("checkpoints/freevc.pth", freevc, None) |
|
smodel = SpeakerEncoder('speaker_encoder/ckpt/pretrained_bak_5805000.pt') |
|
|
|
print("Loading FreeVC(24k)...") |
|
hps = utils.get_hparams_from_file("configs/freevc-24.json") |
|
freevc_24 = SynthesizerTrn( |
|
hps.data.filter_length // 2 + 1, |
|
hps.train.segment_size // hps.data.hop_length, |
|
**hps.model).to(device) |
|
_ = freevc_24.eval() |
|
_ = utils.load_checkpoint("checkpoints/freevc-24.pth", freevc_24, None) |
|
|
|
print("Loading FreeVC-s...") |
|
hps = utils.get_hparams_from_file("configs/freevc-s.json") |
|
freevc_s = SynthesizerTrn( |
|
hps.data.filter_length // 2 + 1, |
|
hps.train.segment_size // hps.data.hop_length, |
|
**hps.model).to(device) |
|
_ = freevc_s.eval() |
|
_ = utils.load_checkpoint("checkpoints/freevc-s.pth", freevc_s, None) |
|
|
|
print("Loading WavLM for content...") |
|
cmodel = WavLMModel.from_pretrained("microsoft/wavlm-large").to(device) |
|
|
|
|
|
|
|
import ffmpeg |
|
|
|
import random |
|
import numpy as np |
|
|
|
from elevenlabs.client import ElevenLabs |
|
|
|
|
|
def pad_buffer(audio): |
|
|
|
buffer_size = len(audio) |
|
element_size = np.dtype(np.int16).itemsize |
|
if buffer_size % element_size != 0: |
|
audio = audio + b'\0' * (element_size - (buffer_size % element_size)) |
|
return audio |
|
|
|
|
|
def generate_voice(api_key, text, voice): |
|
client = ElevenLabs( |
|
api_key=api_key, |
|
) |
|
audio = client.generate(text=text, voice=voice) |
|
audio = b"".join(audio) |
|
with open("output.mp3", "wb") as f: |
|
f.write(audio) |
|
return "output.mp3" |
|
|
|
|
|
html_denoise = """ |
|
<html> |
|
<head> |
|
</script> |
|
<link rel="stylesheet" href="https://gradio.s3-us-west-2.amazonaws.com/2.6.2/static/bundle.css"> |
|
</head> |
|
<body> |
|
<div id="target"></div> |
|
<script src="https://gradio.s3-us-west-2.amazonaws.com/2.6.2/static/bundle.js"></script> |
|
<script |
|
type="module" |
|
src="https://gradio.s3-us-west-2.amazonaws.com/4.15.0/gradio.js" |
|
></script> |
|
<iframe |
|
src="https://g-app-center-40055665-8145-0zp6jbv.openxlab.space" |
|
frameBorder="0" |
|
width="1280" |
|
height="700" |
|
></iframe> |
|
|
|
</body> |
|
</html> |
|
""" |
|
|
|
def convert(api_key, text, tgt, voice, save_path): |
|
model = "FreeVC (24kHz)" |
|
with torch.no_grad(): |
|
|
|
wav_tgt, _ = librosa.load(tgt, sr=hps.data.sampling_rate) |
|
wav_tgt, _ = librosa.effects.trim(wav_tgt, top_db=20) |
|
if model == "FreeVC" or model == "FreeVC (24kHz)": |
|
g_tgt = smodel.embed_utterance(wav_tgt) |
|
g_tgt = torch.from_numpy(g_tgt).unsqueeze(0).to(device) |
|
else: |
|
wav_tgt = torch.from_numpy(wav_tgt).unsqueeze(0).to(device) |
|
mel_tgt = mel_spectrogram_torch( |
|
wav_tgt, |
|
hps.data.filter_length, |
|
hps.data.n_mel_channels, |
|
hps.data.sampling_rate, |
|
hps.data.hop_length, |
|
hps.data.win_length, |
|
hps.data.mel_fmin, |
|
hps.data.mel_fmax |
|
) |
|
|
|
src = generate_voice(api_key, text, voice) |
|
wav_src, _ = librosa.load(src, sr=hps.data.sampling_rate) |
|
wav_src = torch.from_numpy(wav_src).unsqueeze(0).to(device) |
|
c = cmodel(wav_src).last_hidden_state.transpose(1, 2).to(device) |
|
|
|
if model == "FreeVC": |
|
audio = freevc.infer(c, g=g_tgt) |
|
elif model == "FreeVC-s": |
|
audio = freevc_s.infer(c, mel=mel_tgt) |
|
else: |
|
audio = freevc_24.infer(c, g=g_tgt) |
|
audio = audio[0][0].data.cpu().float().numpy() |
|
if model == "FreeVC" or model == "FreeVC-s": |
|
write(f"output/{save_path}.wav", hps.data.sampling_rate, audio) |
|
else: |
|
write(f"output/{save_path}.wav", 24000, audio) |
|
return f"output/{save_path}.wav" |
|
|
|
|
|
class subtitle: |
|
def __init__(self,index:int, start_time, end_time, text:str): |
|
self.index = int(index) |
|
self.start_time = start_time |
|
self.end_time = end_time |
|
self.text = text.strip() |
|
def normalize(self,ntype:str,fps=30): |
|
if ntype=="prcsv": |
|
h,m,s,fs=(self.start_time.replace(';',':')).split(":") |
|
self.start_time=int(h)*3600+int(m)*60+int(s)+round(int(fs)/fps,5) |
|
h,m,s,fs=(self.end_time.replace(';',':')).split(":") |
|
self.end_time=int(h)*3600+int(m)*60+int(s)+round(int(fs)/fps,5) |
|
elif ntype=="srt": |
|
h,m,s=self.start_time.split(":") |
|
s=s.replace(",",".") |
|
self.start_time=int(h)*3600+int(m)*60+round(float(s),5) |
|
h,m,s=self.end_time.split(":") |
|
s=s.replace(",",".") |
|
self.end_time=int(h)*3600+int(m)*60+round(float(s),5) |
|
else: |
|
raise ValueError |
|
def add_offset(self,offset=0): |
|
self.start_time+=offset |
|
if self.start_time<0: |
|
self.start_time=0 |
|
self.end_time+=offset |
|
if self.end_time<0: |
|
self.end_time=0 |
|
def __str__(self) -> str: |
|
return f'id:{self.index},start:{self.start_time},end:{self.end_time},text:{self.text}' |
|
|
|
def read_srt(uploaded_file): |
|
offset=0 |
|
with open(uploaded_file.name,"r",encoding="utf-8") as f: |
|
file=f.readlines() |
|
subtitle_list=[] |
|
indexlist=[] |
|
filelength=len(file) |
|
for i in range(0,filelength): |
|
if " --> " in file[i]: |
|
is_st=True |
|
for char in file[i-1].strip().replace("\ufeff",""): |
|
if char not in ['0','1','2','3','4','5','6','7','8','9']: |
|
is_st=False |
|
break |
|
if is_st: |
|
indexlist.append(i) |
|
listlength=len(indexlist) |
|
for i in range(0,listlength-1): |
|
st,et=file[indexlist[i]].split(" --> ") |
|
id=int(file[indexlist[i]-1].strip().replace("\ufeff","")) |
|
text="" |
|
for x in range(indexlist[i]+1,indexlist[i+1]-2): |
|
text+=file[x] |
|
st=subtitle(id,st,et,text) |
|
st.normalize(ntype="srt") |
|
st.add_offset(offset=offset) |
|
subtitle_list.append(st) |
|
st,et=file[indexlist[-1]].split(" --> ") |
|
id=file[indexlist[-1]-1] |
|
text="" |
|
for x in range(indexlist[-1]+1,filelength): |
|
text+=file[x] |
|
st=subtitle(id,st,et,text) |
|
st.normalize(ntype="srt") |
|
st.add_offset(offset=offset) |
|
subtitle_list.append(st) |
|
return subtitle_list |
|
|
|
import webrtcvad |
|
from pydub import AudioSegment |
|
from pydub.utils import make_chunks |
|
|
|
def vad(audio_name, out_path_name): |
|
audio = AudioSegment.from_file(audio_name, format="wav") |
|
|
|
audio = audio.set_frame_rate(48000) |
|
|
|
audio = audio.set_channels(1) |
|
|
|
|
|
vad = webrtcvad.Vad() |
|
|
|
vad.set_mode(3) |
|
|
|
|
|
frame_duration = 30 |
|
frame_width = int(audio.frame_rate * frame_duration / 1000) |
|
frames = make_chunks(audio, frame_duration) |
|
|
|
|
|
voiced_frames = [] |
|
for frame in frames: |
|
if len(frame.raw_data) < frame_width * 2: |
|
break |
|
is_speech = vad.is_speech(frame.raw_data, audio.frame_rate) |
|
if is_speech: |
|
voiced_frames.append(frame) |
|
|
|
|
|
voiced_audio = sum(voiced_frames, AudioSegment.silent(duration=0)) |
|
|
|
voiced_audio.export(f"{out_path_name}.wav", format="wav") |
|
|
|
|
|
def trim_audio(intervals, input_file_path, output_file_path): |
|
|
|
audio = AudioSegment.from_file(input_file_path) |
|
|
|
|
|
for i, (start_time, end_time) in enumerate(intervals): |
|
|
|
segment = audio[start_time*1000:end_time*1000] |
|
output_file_path_i = f"increased_{i}.wav" |
|
|
|
if len(segment) < 5000: |
|
|
|
repeat_count = (5000 // len(segment)) + 3 |
|
|
|
longer_audio = segment * repeat_count |
|
|
|
print(f"Audio was less than 5 seconds. Extended to {len(longer_audio)} milliseconds.") |
|
longer_audio.export(output_file_path_i, format='wav') |
|
vad(f"{output_file_path_i}", f"{output_file_path}_{i}") |
|
else: |
|
print("Audio is already 5 seconds or longer.") |
|
segment.export(f"{output_file_path}_{i}.wav", format='wav') |
|
|
|
import re |
|
|
|
def sort_key(file_name): |
|
"""Extract the last number in the file name for sorting.""" |
|
numbers = re.findall(r'\d+', file_name) |
|
if numbers: |
|
return int(numbers[-1]) |
|
return -1 |
|
|
|
|
|
def merge_audios(folder_path): |
|
output_file = "AI配音版.wav" |
|
|
|
files = [f for f in os.listdir(folder_path) if f.endswith('.wav')] |
|
|
|
sorted_files = sorted(files, key=sort_key) |
|
|
|
|
|
merged_audio = AudioSegment.empty() |
|
|
|
|
|
for file in sorted_files: |
|
audio = AudioSegment.from_wav(os.path.join(folder_path, file)) |
|
merged_audio += audio |
|
print(f"Merged: {file}") |
|
|
|
|
|
merged_audio.export(output_file, format="wav") |
|
return "AI配音版.wav" |
|
|
|
import shutil |
|
|
|
|
|
|
|
import zipfile |
|
|
|
def zip_sliced_files(directory, zip_filename, chosen_name): |
|
|
|
with zipfile.ZipFile(zip_filename, 'w') as zipf: |
|
|
|
for foldername, subfolders, filenames in os.walk(directory): |
|
for filename in filenames: |
|
|
|
if filename.startswith(f"{chosen_name}") and filename.endswith(".wav"): |
|
|
|
file_path = os.path.join(foldername, filename) |
|
|
|
zipf.write(file_path, arcname=filename) |
|
print(f"Added {filename} to {zip_filename}") |
|
|
|
|
|
|
|
from pydub.effects import speedup |
|
|
|
def change_speed(input_file, speed=1.0): |
|
|
|
audio = AudioSegment.from_file(input_file) |
|
|
|
|
|
faster_audio = speedup(audio, playback_speed=speed) |
|
|
|
|
|
faster_audio.export("speed_changed_speech.wav", format="wav") |
|
return "speed_changed_speech.wav" |
|
|
|
|
|
|
|
def delete_sliced_files(directory, chosen_name): |
|
|
|
for foldername, subfolders, filenames in os.walk(directory): |
|
for filename in filenames: |
|
|
|
if filename.startswith(f"{chosen_name}"): |
|
|
|
file_path = os.path.join(foldername, filename) |
|
|
|
os.remove(file_path) |
|
print(f"Deleted {filename}") |
|
|
|
|
|
def convert_from_srt(api_key, filename, audio_full, voice, multilingual): |
|
|
|
subtitle_list = read_srt(filename) |
|
delete_sliced_files("./", "sliced") |
|
|
|
|
|
|
|
|
|
|
|
if os.path.isdir("output"): |
|
shutil.rmtree("output") |
|
if multilingual==False: |
|
for i in subtitle_list: |
|
try: |
|
os.makedirs("output", exist_ok=True) |
|
trim_audio([[i.start_time, i.end_time]], audio_full, f"sliced_audio_{i.index}") |
|
print(f"正在合成第{i.index}条语音") |
|
print(f"语音内容:{i.text}") |
|
convert(api_key, i.text, f"sliced_audio_{i.index}_0.wav", voice, i.text + " " + str(i.index)) |
|
except Exception: |
|
pass |
|
else: |
|
for i in subtitle_list: |
|
try: |
|
os.makedirs("output", exist_ok=True) |
|
trim_audio([[i.start_time, i.end_time]], audio_full, f"sliced_audio_{i.index}") |
|
print(f"正在合成第{i.index}条语音") |
|
print(f"语音内容:{i.text.splitlines()[1]}") |
|
convert(api_key, i.text.splitlines()[1], f"sliced_audio_{i.index}_0.wav", voice, i.text.splitlines()[1] + " " + str(i.index)) |
|
except Exception: |
|
pass |
|
merge_audios("output") |
|
|
|
zip_sliced_files("./", "参考音频.zip", "sliced") |
|
|
|
return "AI配音版.wav", "参考音频.zip" |
|
|
|
restart_markdown = (""" |
|
### 若此页面无法正常显示,请点击[此链接](https://openxlab.org.cn/apps/detail/Kevin676/OpenAI-TTS)唤醒该程序!谢谢🍻 |
|
""") |
|
|
|
import ffmpeg |
|
|
|
def save_file_with_new_name(original_file_path, new_file_path): |
|
shutil.copyfile(original_file_path, new_file_path) |
|
|
|
|
|
def denoise(input_files): |
|
delete_sliced_files("./", "input_video") |
|
|
|
|
|
|
|
for video_file in input_files: |
|
|
|
name1 = video_file.name |
|
file_name_with_extension = name1.split('/')[-1] |
|
file_name1 = file_name_with_extension.split('.mp4')[0] + ".mp4" |
|
|
|
save_file_with_new_name(video_file.name, file_name1) |
|
|
|
ffmpeg.input(file_name1).output("input_video" + file_name1 + ".wav", ac=2, ar=44100).run() |
|
|
|
zip_sliced_files("./", "转换后的音频.zip", "input_video") |
|
|
|
|
|
return "转换后的音频.zip" |
|
|
|
|
|
with gr.Blocks() as app: |
|
gr.Markdown("# <center>🌊💕🎶 11Labs TTS - SRT文件一键AI配音</center>") |
|
gr.Markdown("### <center>🌟 只需上传SRT文件和原版配音文件即可,每次一集视频AI自动配音!Developed by Kevin Wang </center>") |
|
with gr.Tab("📺视频转音频"): |
|
with gr.Row(): |
|
inp_video = gr.Files(label="您可以上传多集包含原声配音的视频", file_types=['.mp4']) |
|
btn_convert = gr.Button("视频文件转音频", variant="primary") |
|
out_audio = gr.File(label="包含所有配音音频的zip文件") |
|
|
|
btn_convert.click(denoise, [inp_video], [out_audio]) |
|
with gr.Tab("🎶AI配音"): |
|
with gr.Row(): |
|
with gr.Column(): |
|
inp0 = gr.Textbox(type='password', label='请输入您的11Labs API Key') |
|
inp1 = gr.File(file_count="single", label="请上传一集视频对应的SRT文件") |
|
inp2 = gr.Audio(label="请上传一集视频的配音文件", type="filepath") |
|
|
|
inp3 = gr.Dropdown(choices=["Rachel", "Alice", "Chris", "Adam"], label='请选择一个说话人提供基础音色', info="试听音色链接:https://elevenlabs.io/app/speech-synthesis", value='Chris') |
|
|
|
inp4 = gr.Checkbox(label="SRT文件是否为双语字幕", info="若为双语字幕,请打勾选择(SRT文件中需要先出现中文字幕,后英文字幕;中英字幕各占一行)") |
|
btn1 = gr.Button("一键开启AI配音吧💕", variant="primary") |
|
with gr.Column(): |
|
out1 = gr.Audio(label="为您生成的AI完整配音", type="filepath") |
|
out2 = gr.File(label="包含所有参考音频的zip文件") |
|
inp_speed = gr.Slider(label="设置AI配音的速度", minimum=1.02, maximum=1.5, value=1.02, step=0.01) |
|
btn2 = gr.Button("一键改变AI配音速度") |
|
out3 = gr.Audio(label="变速后的AI配音", type="filepath") |
|
|
|
btn1.click(convert_from_srt, [inp0, inp1, inp2, inp3, inp4], [out1, out2]) |
|
btn2.click(change_speed, [out1, inp_speed], [out3]) |
|
|
|
gr.Markdown("### <center>注意❗:请勿生成会对任何个人或组织造成侵害的内容,请尊重他人的著作权和知识产权。用户对此程序的任何使用行为与程序开发者无关。</center>") |
|
gr.HTML(''' |
|
<div class="footer"> |
|
<p>🌊🏞️🎶 - 江水东流急,滔滔无尽声。 明·顾璘 |
|
</p> |
|
</div> |
|
''') |
|
|
|
app.launch(share=False, show_error=True) |