diff --git a/.gitattributes b/.gitattributes
new file mode 100644
index 0000000000000000000000000000000000000000..c7d9f3332a950355d5a77d85000f05e6f45435ea
--- /dev/null
+++ b/.gitattributes
@@ -0,0 +1,34 @@
+*.7z filter=lfs diff=lfs merge=lfs -text
+*.arrow filter=lfs diff=lfs merge=lfs -text
+*.bin filter=lfs diff=lfs merge=lfs -text
+*.bz2 filter=lfs diff=lfs merge=lfs -text
+*.ckpt filter=lfs diff=lfs merge=lfs -text
+*.ftz filter=lfs diff=lfs merge=lfs -text
+*.gz filter=lfs diff=lfs merge=lfs -text
+*.h5 filter=lfs diff=lfs merge=lfs -text
+*.joblib filter=lfs diff=lfs merge=lfs -text
+*.lfs.* filter=lfs diff=lfs merge=lfs -text
+*.mlmodel filter=lfs diff=lfs merge=lfs -text
+*.model filter=lfs diff=lfs merge=lfs -text
+*.msgpack filter=lfs diff=lfs merge=lfs -text
+*.npy filter=lfs diff=lfs merge=lfs -text
+*.npz filter=lfs diff=lfs merge=lfs -text
+*.onnx filter=lfs diff=lfs merge=lfs -text
+*.ot filter=lfs diff=lfs merge=lfs -text
+*.parquet filter=lfs diff=lfs merge=lfs -text
+*.pb filter=lfs diff=lfs merge=lfs -text
+*.pickle filter=lfs diff=lfs merge=lfs -text
+*.pkl filter=lfs diff=lfs merge=lfs -text
+*.pt filter=lfs diff=lfs merge=lfs -text
+*.pth filter=lfs diff=lfs merge=lfs -text
+*.rar filter=lfs diff=lfs merge=lfs -text
+*.safetensors filter=lfs diff=lfs merge=lfs -text
+saved_model/**/* filter=lfs diff=lfs merge=lfs -text
+*.tar.* filter=lfs diff=lfs merge=lfs -text
+*.tflite filter=lfs diff=lfs merge=lfs -text
+*.tgz filter=lfs diff=lfs merge=lfs -text
+*.wasm filter=lfs diff=lfs merge=lfs -text
+*.xz filter=lfs diff=lfs merge=lfs -text
+*.zip filter=lfs diff=lfs merge=lfs -text
+*.zst filter=lfs diff=lfs merge=lfs -text
+*tfevents* filter=lfs diff=lfs merge=lfs -text
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..53495401dc1ad6f74aa008ee1173a81c5b83e7e9
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,14 @@
+__pycache__/
+/outputs
+/speakers
+.vs
+*.npz
+*.wav
+*.npy
+.vs/
+/models
+/bark_ui_enhanced.egg-info
+/build/lib/bark
+*.pth
+*.pt
+*.zip
diff --git a/Dockerfile b/Dockerfile
new file mode 100644
index 0000000000000000000000000000000000000000..00b1196aa099cc58dbbc3bc37d09af3d1e7031e6
--- /dev/null
+++ b/Dockerfile
@@ -0,0 +1,38 @@
+FROM debian:stable
+
+# Install system packages
+RUN apt update && apt install -y git pip
+
+# Create non-root user
+RUN useradd -m -d /bark bark
+
+# Run as new user
+USER bark
+WORKDIR /bark
+
+# Clone git repo
+RUN git clone https://github.com/C0untFloyd/bark-gui
+
+# Switch to git directory
+WORKDIR /bark/bark-gui
+
+# Append pip bin path to PATH
+ENV PATH=$PATH:/bark/.local/bin
+
+# Install dependancies
+RUN pip install .
+RUN pip install -r requirements.txt
+
+# List on all addresses, since we are in a container.
+RUN sed -i "s/server_name: ''/server_name: 0.0.0.0/g" ./config.yaml
+
+# Suggested volumes
+VOLUME /bark/bark-gui/assets/prompts/custom
+VOLUME /bark/bark-gui/models
+VOLUME /bark/.cache/huggingface/hub
+
+# Default port for web-ui
+EXPOSE 7860/tcp
+
+# Start script
+CMD python3 webui.py
diff --git a/README.md b/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..7c3a9a7514ef9f36f54404adca8b0b168e305fe2
--- /dev/null
+++ b/README.md
@@ -0,0 +1,14 @@
+---
+title: Bark Voice Cloning
+emoji: 🎶
+colorFrom: yellow
+colorTo: pink
+sdk: gradio
+sdk_version: 3.34.0
+app_file: app.py
+pinned: false
+license: mit
+duplicated_from: marker22/Bark-Voice-Cloning
+---
+
+Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/app.py b/app.py
new file mode 100644
index 0000000000000000000000000000000000000000..e60c6acea113aba62d1a3f8e8186b1ae075aa989
--- /dev/null
+++ b/app.py
@@ -0,0 +1,401 @@
+from cProfile import label
+import dataclasses
+from distutils.command.check import check
+from doctest import Example
+import gradio as gr
+import os
+import sys
+import numpy as np
+import logging
+import torch
+import pytorch_seed
+import time
+
+from xml.sax import saxutils
+from bark.api import generate_with_settings
+from bark.api import save_as_prompt
+from util.settings import Settings
+#import nltk
+
+
+from bark import SAMPLE_RATE
+from cloning.clonevoice import clone_voice
+from bark.generation import SAMPLE_RATE, preload_models, _load_history_prompt, codec_decode
+from scipy.io.wavfile import write as write_wav
+from util.parseinput import split_and_recombine_text, build_ssml, is_ssml, create_clips_from_ssml
+from datetime import datetime
+from tqdm.auto import tqdm
+from util.helper import create_filename, add_id3_tag
+from swap_voice import swap_voice_from_audio
+from training.training_prepare import prepare_semantics_from_text, prepare_wavs_from_semantics
+from training.train import training_prepare_files, train
+
+settings = Settings('config.yaml')
+
+
+def generate_text_to_speech(text, selected_speaker, text_temp, waveform_temp, eos_prob, quick_generation, complete_settings, seed, batchcount, progress=gr.Progress(track_tqdm=True)):
+ # Chunk the text into smaller pieces then combine the generated audio
+
+ # generation settings
+ if selected_speaker == 'None':
+ selected_speaker = None
+
+ voice_name = selected_speaker
+
+ if text == None or len(text) < 1:
+ if selected_speaker == None:
+ raise gr.Error('No text entered!')
+
+ # Extract audio data from speaker if no text and speaker selected
+ voicedata = _load_history_prompt(voice_name)
+ audio_arr = codec_decode(voicedata["fine_prompt"])
+ result = create_filename(settings.output_folder_path, "None", "extract",".wav")
+ save_wav(audio_arr, result)
+ return result
+
+ if batchcount < 1:
+ batchcount = 1
+
+
+ silenceshort = np.zeros(int((float(settings.silence_sentence) / 1000.0) * SAMPLE_RATE), dtype=np.int16) # quarter second of silence
+ silencelong = np.zeros(int((float(settings.silence_speakers) / 1000.0) * SAMPLE_RATE), dtype=np.float32) # half a second of silence
+ use_last_generation_as_history = "Use last generation as history" in complete_settings
+ save_last_generation = "Save generation as Voice" in complete_settings
+ for l in range(batchcount):
+ currentseed = seed
+ if seed != None and seed > 2**32 - 1:
+ logger.warning(f"Seed {seed} > 2**32 - 1 (max), setting to random")
+ currentseed = None
+ if currentseed == None or currentseed <= 0:
+ currentseed = np.random.default_rng().integers(1, 2**32 - 1)
+ assert(0 < currentseed and currentseed < 2**32)
+
+ progress(0, desc="Generating")
+
+ full_generation = None
+
+ all_parts = []
+ complete_text = ""
+ text = text.lstrip()
+ if is_ssml(text):
+ list_speak = create_clips_from_ssml(text)
+ prev_speaker = None
+ for i, clip in tqdm(enumerate(list_speak), total=len(list_speak)):
+ selected_speaker = clip[0]
+ # Add pause break between speakers
+ if i > 0 and selected_speaker != prev_speaker:
+ all_parts += [silencelong.copy()]
+ prev_speaker = selected_speaker
+ text = clip[1]
+ text = saxutils.unescape(text)
+ if selected_speaker == "None":
+ selected_speaker = None
+
+ print(f"\nGenerating Text ({i+1}/{len(list_speak)}) -> {selected_speaker} (Seed {currentseed}):`{text}`")
+ complete_text += text
+ with pytorch_seed.SavedRNG(currentseed):
+ audio_array = generate_with_settings(text_prompt=text, voice_name=selected_speaker, semantic_temp=text_temp, coarse_temp=waveform_temp, eos_p=eos_prob)
+ currentseed = torch.random.initial_seed()
+ if len(list_speak) > 1:
+ filename = create_filename(settings.output_folder_path, currentseed, "audioclip",".wav")
+ save_wav(audio_array, filename)
+ add_id3_tag(filename, text, selected_speaker, currentseed)
+
+ all_parts += [audio_array]
+ else:
+ texts = split_and_recombine_text(text, settings.input_text_desired_length, settings.input_text_max_length)
+ for i, text in tqdm(enumerate(texts), total=len(texts)):
+ print(f"\nGenerating Text ({i+1}/{len(texts)}) -> {selected_speaker} (Seed {currentseed}):`{text}`")
+ complete_text += text
+ if quick_generation == True:
+ with pytorch_seed.SavedRNG(currentseed):
+ audio_array = generate_with_settings(text_prompt=text, voice_name=selected_speaker, semantic_temp=text_temp, coarse_temp=waveform_temp, eos_p=eos_prob)
+ currentseed = torch.random.initial_seed()
+ else:
+ full_output = use_last_generation_as_history or save_last_generation
+ if full_output:
+ full_generation, audio_array = generate_with_settings(text_prompt=text, voice_name=voice_name, semantic_temp=text_temp, coarse_temp=waveform_temp, eos_p=eos_prob, output_full=True)
+ else:
+ audio_array = generate_with_settings(text_prompt=text, voice_name=voice_name, semantic_temp=text_temp, coarse_temp=waveform_temp, eos_p=eos_prob)
+
+ # Noticed this in the HF Demo - convert to 16bit int -32767/32767 - most used audio format
+ # audio_array = (audio_array * 32767).astype(np.int16)
+
+ if len(texts) > 1:
+ filename = create_filename(settings.output_folder_path, currentseed, "audioclip",".wav")
+ save_wav(audio_array, filename)
+ add_id3_tag(filename, text, selected_speaker, currentseed)
+
+ if quick_generation == False and (save_last_generation == True or use_last_generation_as_history == True):
+ # save to npz
+ voice_name = create_filename(settings.output_folder_path, seed, "audioclip", ".npz")
+ save_as_prompt(voice_name, full_generation)
+ if use_last_generation_as_history:
+ selected_speaker = voice_name
+
+ all_parts += [audio_array]
+ # Add short pause between sentences
+ if text[-1] in "!?.\n" and i > 1:
+ all_parts += [silenceshort.copy()]
+
+ # save & play audio
+ result = create_filename(settings.output_folder_path, currentseed, "final",".wav")
+ save_wav(np.concatenate(all_parts), result)
+ # write id3 tag with text truncated to 60 chars, as a precaution...
+ add_id3_tag(result, complete_text, selected_speaker, currentseed)
+
+ return result
+
+
+
+def save_wav(audio_array, filename):
+ write_wav(filename, SAMPLE_RATE, audio_array)
+
+def save_voice(filename, semantic_prompt, coarse_prompt, fine_prompt):
+ np.savez_compressed(
+ filename,
+ semantic_prompt=semantic_prompt,
+ coarse_prompt=coarse_prompt,
+ fine_prompt=fine_prompt
+ )
+
+
+def on_quick_gen_changed(checkbox):
+ if checkbox == False:
+ return gr.CheckboxGroup.update(visible=True)
+ return gr.CheckboxGroup.update(visible=False)
+
+def delete_output_files(checkbox_state):
+ if checkbox_state:
+ outputs_folder = os.path.join(os.getcwd(), settings.output_folder_path)
+ if os.path.exists(outputs_folder):
+ purgedir(outputs_folder)
+ return False
+
+
+# https://stackoverflow.com/a/54494779
+def purgedir(parent):
+ for root, dirs, files in os.walk(parent):
+ for item in files:
+ # Delete subordinate files
+ filespec = os.path.join(root, item)
+ os.unlink(filespec)
+ for item in dirs:
+ # Recursively perform this operation for subordinate directories
+ purgedir(os.path.join(root, item))
+
+def convert_text_to_ssml(text, selected_speaker):
+ return build_ssml(text, selected_speaker)
+
+
+def training_prepare(selected_step, num_text_generations, progress=gr.Progress(track_tqdm=True)):
+ if selected_step == prepare_training_list[0]:
+ prepare_semantics_from_text()
+ else:
+ prepare_wavs_from_semantics()
+ return None
+
+
+def start_training(save_model_epoch, max_epochs, progress=gr.Progress(track_tqdm=True)):
+ training_prepare_files("./training/data/", "./training/data/checkpoint/hubert_base_ls960.pt")
+ train("./training/data/", save_model_epoch, max_epochs)
+ return None
+
+
+
+def apply_settings(themes, input_server_name, input_server_port, input_server_public, input_desired_len, input_max_len, input_silence_break, input_silence_speaker):
+ settings.selected_theme = themes
+ settings.server_name = input_server_name
+ settings.server_port = input_server_port
+ settings.server_share = input_server_public
+ settings.input_text_desired_length = input_desired_len
+ settings.input_text_max_length = input_max_len
+ settings.silence_sentence = input_silence_break
+ settings.silence_speaker = input_silence_speaker
+ settings.save()
+
+def restart():
+ global restart_server
+ restart_server = True
+
+
+def create_version_html():
+ python_version = ".".join([str(x) for x in sys.version_info[0:3]])
+ versions_html = f"""
+python: {python_version}
+ •
+torch: {getattr(torch, '__long_version__',torch.__version__)}
+ •
+gradio: {gr.__version__}
+"""
+ return versions_html
+
+
+
+logger = logging.getLogger(__name__)
+APPTITLE = "Bark Voice Cloning UI"
+
+
+autolaunch = False
+
+if len(sys.argv) > 1:
+ autolaunch = "-autolaunch" in sys.argv
+
+
+if torch.cuda.is_available() == False:
+ os.environ['BARK_FORCE_CPU'] = 'True'
+ logger.warning("No CUDA detected, fallback to CPU!")
+
+print(f'smallmodels={os.environ.get("SUNO_USE_SMALL_MODELS", False)}')
+print(f'enablemps={os.environ.get("SUNO_ENABLE_MPS", False)}')
+print(f'offloadcpu={os.environ.get("SUNO_OFFLOAD_CPU", False)}')
+print(f'forcecpu={os.environ.get("BARK_FORCE_CPU", False)}')
+print(f'autolaunch={autolaunch}\n\n')
+
+#print("Updating nltk\n")
+#nltk.download('punkt')
+
+print("Preloading Models\n")
+preload_models()
+
+available_themes = ["Default", "gradio/glass", "gradio/monochrome", "gradio/seafoam", "gradio/soft", "gstaff/xkcd", "freddyaboulton/dracula_revamped", "ysharma/steampunk"]
+tokenizer_language_list = ["de","en", "pl"]
+prepare_training_list = ["Step 1: Semantics from Text","Step 2: WAV from Semantics"]
+
+seed = -1
+server_name = settings.server_name
+if len(server_name) < 1:
+ server_name = None
+server_port = settings.server_port
+if server_port <= 0:
+ server_port = None
+global run_server
+global restart_server
+
+run_server = True
+
+while run_server:
+ # Collect all existing speakers/voices in dir
+ speakers_list = []
+
+ for root, dirs, files in os.walk("./bark/assets/prompts"):
+ for file in files:
+ if file.endswith(".npz"):
+ pathpart = root.replace("./bark/assets/prompts", "")
+ name = os.path.join(pathpart, file[:-4])
+ if name.startswith("/") or name.startswith("\\"):
+ name = name[1:]
+ speakers_list.append(name)
+
+ speakers_list = sorted(speakers_list, key=lambda x: x.lower())
+ speakers_list.insert(0, 'None')
+
+ print(f'Launching {APPTITLE} Server')
+
+ # Create Gradio Blocks
+
+ with gr.Blocks(title=f"{APPTITLE}", mode=f"{APPTITLE}", theme=settings.selected_theme) as barkgui:
+ gr.Markdown("#
🐶🎶⭐ - Bark Voice Cloning")
+ gr.Markdown("### 🤗 - If you like this space, please star my [github repo](https://github.com/KevinWang676/Bark-Voice-Cloning)")
+ gr.Markdown("### 🎡 - Based on [bark-gui](https://github.com/C0untFloyd/bark-gui)")
+ gr.Markdown(f""" You can duplicate and use it with a GPU:
+ or open in [Colab](https://colab.research.google.com/github/KevinWang676/Bark-Voice-Cloning/blob/main/Bark_Voice_Cloning_UI.ipynb) for quick start 🌟
+ """)
+
+ with gr.Tab("🎙️ - Clone Voice"):
+ with gr.Row():
+ input_audio_filename = gr.Audio(label="Input audio.wav", source="upload", type="filepath")
+ #transcription_text = gr.Textbox(label="Transcription Text", lines=1, placeholder="Enter Text of your Audio Sample here...")
+ with gr.Row():
+ with gr.Column():
+ initialname = "/home/user/app/bark/assets/prompts/file"
+ output_voice = gr.Textbox(label="Filename of trained Voice (do not change the initial name)", lines=1, placeholder=initialname, value=initialname, visible=False)
+ with gr.Column():
+ tokenizerlang = gr.Dropdown(tokenizer_language_list, label="Base Language Tokenizer", value=tokenizer_language_list[1], visible=False)
+ with gr.Row():
+ clone_voice_button = gr.Button("Create Voice", variant="primary")
+ with gr.Row():
+ dummy = gr.Text(label="Progress")
+ npz_file = gr.File(label=".npz file")
+ speakers_list.insert(0, npz_file) # add prompt
+
+ with gr.Tab("🎵 - TTS"):
+ with gr.Row():
+ with gr.Column():
+ placeholder = "Enter text here."
+ input_text = gr.Textbox(label="Input Text", lines=4, placeholder=placeholder)
+ convert_to_ssml_button = gr.Button("Convert Input Text to SSML")
+ with gr.Column():
+ seedcomponent = gr.Number(label="Seed (default -1 = Random)", precision=0, value=-1)
+ batchcount = gr.Number(label="Batch count", precision=0, value=1)
+
+ with gr.Row():
+ with gr.Column():
+ gr.Markdown("[Voice Prompt Library](https://suno-ai.notion.site/8b8e8749ed514b0cbf3f699013548683?v=bc67cff786b04b50b3ceb756fd05f68c)")
+ speaker = gr.Dropdown(speakers_list, value=speakers_list[0], label="Voice (Choose “file” if you wanna use the custom voice)")
+
+ with gr.Column():
+ text_temp = gr.Slider(0.1, 1.0, value=0.6, label="Generation Temperature", info="1.0 more diverse, 0.1 more conservative")
+ waveform_temp = gr.Slider(0.1, 1.0, value=0.7, label="Waveform temperature", info="1.0 more diverse, 0.1 more conservative")
+
+ with gr.Row():
+ with gr.Column():
+ quick_gen_checkbox = gr.Checkbox(label="Quick Generation", value=True)
+ settings_checkboxes = ["Use last generation as history", "Save generation as Voice"]
+ complete_settings = gr.CheckboxGroup(choices=settings_checkboxes, value=settings_checkboxes, label="Detailed Generation Settings", type="value", interactive=True, visible=False)
+ with gr.Column():
+ eos_prob = gr.Slider(0.0, 0.5, value=0.05, label="End of sentence probability")
+
+ with gr.Row():
+ with gr.Column():
+ tts_create_button = gr.Button("Generate", variant="primary")
+ with gr.Column():
+ hidden_checkbox = gr.Checkbox(visible=False)
+ button_stop_generation = gr.Button("Stop generation")
+ with gr.Row():
+ output_audio = gr.Audio(label="Generated Audio", type="filepath")
+
+ with gr.Tab("🔮 - Voice Conversion"):
+ with gr.Row():
+ swap_audio_filename = gr.Audio(label="Input audio.wav to swap voice", source="upload", type="filepath")
+ with gr.Row():
+ with gr.Column():
+ swap_tokenizer_lang = gr.Dropdown(tokenizer_language_list, label="Base Language Tokenizer", value=tokenizer_language_list[1])
+ swap_seed = gr.Number(label="Seed (default -1 = Random)", precision=0, value=-1)
+ with gr.Column():
+ speaker_swap = gr.Dropdown(speakers_list, value=speakers_list[0], label="Voice (Choose “file” if you wanna use the custom voice)")
+ swap_batchcount = gr.Number(label="Batch count", precision=0, value=1)
+ with gr.Row():
+ swap_voice_button = gr.Button("Generate", variant="primary")
+ with gr.Row():
+ output_swap = gr.Audio(label="Generated Audio", type="filepath")
+
+
+ quick_gen_checkbox.change(fn=on_quick_gen_changed, inputs=quick_gen_checkbox, outputs=complete_settings)
+ convert_to_ssml_button.click(convert_text_to_ssml, inputs=[input_text, speaker],outputs=input_text)
+ gen_click = tts_create_button.click(generate_text_to_speech, inputs=[input_text, speaker, text_temp, waveform_temp, eos_prob, quick_gen_checkbox, complete_settings, seedcomponent, batchcount],outputs=output_audio)
+ button_stop_generation.click(fn=None, inputs=None, outputs=None, cancels=[gen_click])
+
+
+
+ swap_voice_button.click(swap_voice_from_audio, inputs=[swap_audio_filename, speaker_swap, swap_tokenizer_lang, swap_seed, swap_batchcount], outputs=output_swap)
+ clone_voice_button.click(clone_voice, inputs=[input_audio_filename, output_voice], outputs=[dummy, npz_file])
+
+
+ restart_server = False
+ try:
+ barkgui.queue().launch(show_error=True)
+ except:
+ restart_server = True
+ run_server = False
+ try:
+ while restart_server == False:
+ time.sleep(1.0)
+ except (KeyboardInterrupt, OSError):
+ print("Keyboard interruption in main thread... closing server.")
+ run_server = False
+ barkgui.close()
+
+
+
+
diff --git a/bark/__init__.py b/bark/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e0b17c8b44869c554931c723446c65d3903821a9
--- /dev/null
+++ b/bark/__init__.py
@@ -0,0 +1,2 @@
+from .api import generate_audio, text_to_semantic, semantic_to_waveform, save_as_prompt
+from .generation import SAMPLE_RATE, preload_models
diff --git a/bark/api.py b/bark/api.py
new file mode 100644
index 0000000000000000000000000000000000000000..7a4319ceaa13798912637290f8e9e88c50d5420a
--- /dev/null
+++ b/bark/api.py
@@ -0,0 +1,158 @@
+from typing import Dict, Optional, Union
+
+import numpy as np
+
+from .generation import codec_decode, generate_coarse, generate_fine, generate_text_semantic
+
+
+def generate_with_settings(text_prompt, semantic_temp=0.6, eos_p=0.2, coarse_temp=0.7, fine_temp=0.5, voice_name=None, output_full=False):
+
+ # generation with more control
+ x_semantic = generate_text_semantic(
+ text_prompt,
+ history_prompt=voice_name,
+ temp=semantic_temp,
+ min_eos_p = eos_p,
+ use_kv_caching=True
+ )
+
+ x_coarse_gen = generate_coarse(
+ x_semantic,
+ history_prompt=voice_name,
+ temp=coarse_temp,
+ use_kv_caching=True
+ )
+ x_fine_gen = generate_fine(
+ x_coarse_gen,
+ history_prompt=voice_name,
+ temp=fine_temp,
+ )
+
+ if output_full:
+ full_generation = {
+ 'semantic_prompt': x_semantic,
+ 'coarse_prompt': x_coarse_gen,
+ 'fine_prompt': x_fine_gen
+ }
+ return full_generation, codec_decode(x_fine_gen)
+ return codec_decode(x_fine_gen)
+
+
+def text_to_semantic(
+ text: str,
+ history_prompt: Optional[Union[Dict, str]] = None,
+ temp: float = 0.7,
+ silent: bool = False,
+):
+ """Generate semantic array from text.
+
+ Args:
+ text: text to be turned into audio
+ history_prompt: history choice for audio cloning
+ temp: generation temperature (1.0 more diverse, 0.0 more conservative)
+ silent: disable progress bar
+
+ Returns:
+ numpy semantic array to be fed into `semantic_to_waveform`
+ """
+ x_semantic = generate_text_semantic(
+ text,
+ history_prompt=history_prompt,
+ temp=temp,
+ silent=silent,
+ use_kv_caching=True
+ )
+ return x_semantic
+
+
+def semantic_to_waveform(
+ semantic_tokens: np.ndarray,
+ history_prompt: Optional[Union[Dict, str]] = None,
+ temp: float = 0.7,
+ silent: bool = False,
+ output_full: bool = False,
+):
+ """Generate audio array from semantic input.
+
+ Args:
+ semantic_tokens: semantic token output from `text_to_semantic`
+ history_prompt: history choice for audio cloning
+ temp: generation temperature (1.0 more diverse, 0.0 more conservative)
+ silent: disable progress bar
+ output_full: return full generation to be used as a history prompt
+
+ Returns:
+ numpy audio array at sample frequency 24khz
+ """
+ coarse_tokens = generate_coarse(
+ semantic_tokens,
+ history_prompt=history_prompt,
+ temp=temp,
+ silent=silent,
+ use_kv_caching=True
+ )
+ fine_tokens = generate_fine(
+ coarse_tokens,
+ history_prompt=history_prompt,
+ temp=0.5,
+ )
+ audio_arr = codec_decode(fine_tokens)
+ if output_full:
+ full_generation = {
+ "semantic_prompt": semantic_tokens,
+ "coarse_prompt": coarse_tokens,
+ "fine_prompt": fine_tokens,
+ }
+ return full_generation, audio_arr
+ return audio_arr
+
+
+def save_as_prompt(filepath, full_generation):
+ assert(filepath.endswith(".npz"))
+ assert(isinstance(full_generation, dict))
+ assert("semantic_prompt" in full_generation)
+ assert("coarse_prompt" in full_generation)
+ assert("fine_prompt" in full_generation)
+ np.savez(filepath, **full_generation)
+
+
+def generate_audio(
+ text: str,
+ history_prompt: Optional[Union[Dict, str]] = None,
+ text_temp: float = 0.7,
+ waveform_temp: float = 0.7,
+ silent: bool = False,
+ output_full: bool = False,
+):
+ """Generate audio array from input text.
+
+ Args:
+ text: text to be turned into audio
+ history_prompt: history choice for audio cloning
+ text_temp: generation temperature (1.0 more diverse, 0.0 more conservative)
+ waveform_temp: generation temperature (1.0 more diverse, 0.0 more conservative)
+ silent: disable progress bar
+ output_full: return full generation to be used as a history prompt
+
+ Returns:
+ numpy audio array at sample frequency 24khz
+ """
+ semantic_tokens = text_to_semantic(
+ text,
+ history_prompt=history_prompt,
+ temp=text_temp,
+ silent=silent,
+ )
+ out = semantic_to_waveform(
+ semantic_tokens,
+ history_prompt=history_prompt,
+ temp=waveform_temp,
+ silent=silent,
+ output_full=output_full,
+ )
+ if output_full:
+ full_generation, audio_arr = out
+ return full_generation, audio_arr
+ else:
+ audio_arr = out
+ return audio_arr
diff --git a/bark/assets/prompts/announcer.npz b/bark/assets/prompts/announcer.npz
new file mode 100644
index 0000000000000000000000000000000000000000..28e92eb5d6361c9322119ccc9acdc5c4d9183561
--- /dev/null
+++ b/bark/assets/prompts/announcer.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:26f2d1a9e3b6fe453cf5fc8191de26cbfae6276c5b0f7c376c6a0f3c35867f83
+size 16794
diff --git a/bark/assets/prompts/v2/en_speaker_0.npz b/bark/assets/prompts/v2/en_speaker_0.npz
new file mode 100644
index 0000000000000000000000000000000000000000..2ccc5a8a08be9765800958b93858b5720b594665
--- /dev/null
+++ b/bark/assets/prompts/v2/en_speaker_0.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:932f40d879ba8659f1ca26319ba64ea3b0647b2050fe24313bf42b0dff1fe241
+size 28100
diff --git a/bark/assets/prompts/v2/en_speaker_1.npz b/bark/assets/prompts/v2/en_speaker_1.npz
new file mode 100644
index 0000000000000000000000000000000000000000..773451dd1073938fccf73895ec049042c9609bc0
--- /dev/null
+++ b/bark/assets/prompts/v2/en_speaker_1.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:5e7f18015e1ab9b6302ded1e28a971af5306a72f193bb6c411f1948a083c8578
+size 25220
diff --git a/bark/assets/prompts/v2/en_speaker_2.npz b/bark/assets/prompts/v2/en_speaker_2.npz
new file mode 100644
index 0000000000000000000000000000000000000000..8a2f9e4366031f67781097371e08a36342635ff4
--- /dev/null
+++ b/bark/assets/prompts/v2/en_speaker_2.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d218990680ece5f2d4fc18ea4783b016b3ae353ec413eaee2058f2d57263c9b3
+size 26236
diff --git a/bark/assets/prompts/v2/en_speaker_3.npz b/bark/assets/prompts/v2/en_speaker_3.npz
new file mode 100644
index 0000000000000000000000000000000000000000..103cfb362b1ede1b67145d4c2384c7797e8d5ea4
--- /dev/null
+++ b/bark/assets/prompts/v2/en_speaker_3.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:92c2e2a29145c83738e9b63f082fd1c873d9422468a155463cb27f814aeaea66
+size 34980
diff --git a/bark/assets/prompts/v2/en_speaker_4.npz b/bark/assets/prompts/v2/en_speaker_4.npz
new file mode 100644
index 0000000000000000000000000000000000000000..123777ca72c8bbd4d4548b48d6e0cae91b13ab0d
--- /dev/null
+++ b/bark/assets/prompts/v2/en_speaker_4.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:992f91991a9a5359d72f00b09a11a550e71bb8ebfc0cfd877e39d7d41f98b714
+size 23780
diff --git a/bark/assets/prompts/v2/en_speaker_5.npz b/bark/assets/prompts/v2/en_speaker_5.npz
new file mode 100644
index 0000000000000000000000000000000000000000..dcf05979f75c24b11888ab53da02ddb118c91459
--- /dev/null
+++ b/bark/assets/prompts/v2/en_speaker_5.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:18831c3f6014e4a2ff60ad5169b1fae06e28ed07f43f8a3616aafb84515091bf
+size 24740
diff --git a/bark/assets/prompts/v2/en_speaker_6.npz b/bark/assets/prompts/v2/en_speaker_6.npz
new file mode 100644
index 0000000000000000000000000000000000000000..090f03f886a4eba3105a0d28e7b739fb600c2cd8
--- /dev/null
+++ b/bark/assets/prompts/v2/en_speaker_6.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:fab38dc6b6bc9226bcc414f4c5a9524bc1b2441865a586153fb620127a8faa4e
+size 25540
diff --git a/bark/assets/prompts/v2/en_speaker_7.npz b/bark/assets/prompts/v2/en_speaker_7.npz
new file mode 100644
index 0000000000000000000000000000000000000000..d5d9068bff806b7c6e1025720c5a2c1636ba8b36
--- /dev/null
+++ b/bark/assets/prompts/v2/en_speaker_7.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8f4c4eb33f5994be8de5cfd1744ebce13da1618a6da3a7d244514178c61ef7db
+size 22716
diff --git a/bark/assets/prompts/v2/en_speaker_8.npz b/bark/assets/prompts/v2/en_speaker_8.npz
new file mode 100644
index 0000000000000000000000000000000000000000..99bdf0061c5d3377aa1aebe5759faa3f41aa27e1
--- /dev/null
+++ b/bark/assets/prompts/v2/en_speaker_8.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8fc9f11b539588f51bbf78150a73e0365c49b2306bd72e5a22b28ef09c4fb15d
+size 23300
diff --git a/bark/assets/prompts/v2/en_speaker_9.npz b/bark/assets/prompts/v2/en_speaker_9.npz
new file mode 100644
index 0000000000000000000000000000000000000000..2439d40fb6cf3a754c4ce305d3c95e8c463690d1
--- /dev/null
+++ b/bark/assets/prompts/v2/en_speaker_9.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:78b3ba32eb9aeb9ed34556856c40633ecc8332d1c3ae3c81e6f5015ac3eefbd5
+size 30180
diff --git a/bark/assets/prompts/v2/zh_speaker_0.npz b/bark/assets/prompts/v2/zh_speaker_0.npz
new file mode 100644
index 0000000000000000000000000000000000000000..c0da0dd19dee7ea7045b24af8b5ef979b3967d99
--- /dev/null
+++ b/bark/assets/prompts/v2/zh_speaker_0.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:bd7ac118a3e944b3f20c89f2446056a00850a630ee16318922acc6572ce80929
+size 20636
diff --git a/bark/assets/prompts/v2/zh_speaker_1.npz b/bark/assets/prompts/v2/zh_speaker_1.npz
new file mode 100644
index 0000000000000000000000000000000000000000..a41097e8fadddf15777cf8e4433602eeaee81e52
--- /dev/null
+++ b/bark/assets/prompts/v2/zh_speaker_1.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:0eacf5c862dfd3c5ac825f2ebb26f323e64309cb712e7e264cbd31c5bca3f038
+size 19836
diff --git a/bark/assets/prompts/v2/zh_speaker_2.npz b/bark/assets/prompts/v2/zh_speaker_2.npz
new file mode 100644
index 0000000000000000000000000000000000000000..4fca832724ff2da321f2ef129e224d524075690d
--- /dev/null
+++ b/bark/assets/prompts/v2/zh_speaker_2.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e324b47f8250e5798c314f395d4e049575e7ca369d0b6074e91c7bba70e9f26d
+size 21060
diff --git a/bark/assets/prompts/v2/zh_speaker_3.npz b/bark/assets/prompts/v2/zh_speaker_3.npz
new file mode 100644
index 0000000000000000000000000000000000000000..cd1d101a472fd9dcfa3c6d374f5099e42a002e73
--- /dev/null
+++ b/bark/assets/prompts/v2/zh_speaker_3.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:98c476abc7bf634ffb2d71d363284e7bd8c8abd5e33ec5ca21d4aa5b15730d18
+size 31300
diff --git a/bark/assets/prompts/v2/zh_speaker_4.npz b/bark/assets/prompts/v2/zh_speaker_4.npz
new file mode 100644
index 0000000000000000000000000000000000000000..8c2c94f8f02f8fc8ee490fd1174195634a28ab67
--- /dev/null
+++ b/bark/assets/prompts/v2/zh_speaker_4.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:1fa8673a9895ad3302d13ac94193b5ad5da481f1cc276e6181fa895acaae133b
+size 29964
diff --git a/bark/assets/prompts/v2/zh_speaker_5.npz b/bark/assets/prompts/v2/zh_speaker_5.npz
new file mode 100644
index 0000000000000000000000000000000000000000..f2269a6bc79a059214486a5a346e2890bb355b95
--- /dev/null
+++ b/bark/assets/prompts/v2/zh_speaker_5.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:226edfe5fabc72eeb83a13e350599bc8babe5adc2264b3cdb661fd1258dc4044
+size 17436
diff --git a/bark/assets/prompts/v2/zh_speaker_6.npz b/bark/assets/prompts/v2/zh_speaker_6.npz
new file mode 100644
index 0000000000000000000000000000000000000000..76a4891df92e084fbd3c1e7c19682ad155694efe
--- /dev/null
+++ b/bark/assets/prompts/v2/zh_speaker_6.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:285d51fbe81cc263636b5b487fbb6633e6f3cf92c53ca9ab8e6b7f55d4b4a31d
+size 16900
diff --git a/bark/assets/prompts/v2/zh_speaker_7.npz b/bark/assets/prompts/v2/zh_speaker_7.npz
new file mode 100644
index 0000000000000000000000000000000000000000..7d4d635ffe13e4f9a21e9d5b8f514f9db4f1ebab
--- /dev/null
+++ b/bark/assets/prompts/v2/zh_speaker_7.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:0967cdb14ffa79895747b0d52df9f15bdad80d6c55b7630894345c9a7ec87c91
+size 21060
diff --git a/bark/assets/prompts/v2/zh_speaker_8.npz b/bark/assets/prompts/v2/zh_speaker_8.npz
new file mode 100644
index 0000000000000000000000000000000000000000..1ea29786a479ff5fe94822fee1e00a6484c8bec3
--- /dev/null
+++ b/bark/assets/prompts/v2/zh_speaker_8.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c028f78530013f29ab8c0c1cf4fe2138106fbe5252951f5f36e0168056779549
+size 19300
diff --git a/bark/assets/prompts/v2/zh_speaker_9.npz b/bark/assets/prompts/v2/zh_speaker_9.npz
new file mode 100644
index 0000000000000000000000000000000000000000..caf80d75d736fd7a8c0a8febdd23d2e99449896b
--- /dev/null
+++ b/bark/assets/prompts/v2/zh_speaker_9.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:6265bb827008d7af8a45a8e057fe3e91efb347d56208180a9ed990ad54e4d75e
+size 16156
diff --git a/bark/generation.py b/bark/generation.py
new file mode 100644
index 0000000000000000000000000000000000000000..ad474d770235c7b665218e64699fb0b0b1b8cc3f
--- /dev/null
+++ b/bark/generation.py
@@ -0,0 +1,864 @@
+import contextlib
+import gc
+import os
+import re
+import requests
+import gc
+import sys
+
+from encodec import EncodecModel
+import funcy
+import logging
+import numpy as np
+from scipy.special import softmax
+import torch
+import torch.nn.functional as F
+import tqdm
+from transformers import BertTokenizer
+from huggingface_hub import hf_hub_download, hf_hub_url
+
+from .model import GPTConfig, GPT
+from .model_fine import FineGPT, FineGPTConfig
+from .settings import initenv
+
+initenv(sys.argv)
+global_force_cpu = os.environ.get("BARK_FORCE_CPU", False)
+if (
+ global_force_cpu != True and
+ torch.cuda.is_available() and
+ hasattr(torch.cuda, "amp") and
+ hasattr(torch.cuda.amp, "autocast") and
+ hasattr(torch.cuda, "is_bf16_supported") and
+ torch.cuda.is_bf16_supported()
+):
+ autocast = funcy.partial(torch.cuda.amp.autocast, dtype=torch.bfloat16)
+else:
+ @contextlib.contextmanager
+ def autocast():
+ yield
+
+
+# hold models in global scope to lazy load
+global models
+models = {}
+
+global models_devices
+models_devices = {}
+
+
+CONTEXT_WINDOW_SIZE = 1024
+
+SEMANTIC_RATE_HZ = 49.9
+SEMANTIC_VOCAB_SIZE = 10_000
+
+CODEBOOK_SIZE = 1024
+N_COARSE_CODEBOOKS = 2
+N_FINE_CODEBOOKS = 8
+COARSE_RATE_HZ = 75
+
+SAMPLE_RATE = 24_000
+
+
+SUPPORTED_LANGS = [
+ ("English", "en"),
+ ("German", "de"),
+ ("Spanish", "es"),
+ ("French", "fr"),
+ ("Hindi", "hi"),
+ ("Italian", "it"),
+ ("Japanese", "ja"),
+ ("Korean", "ko"),
+ ("Polish", "pl"),
+ ("Portuguese", "pt"),
+ ("Russian", "ru"),
+ ("Turkish", "tr"),
+ ("Chinese", "zh"),
+]
+
+ALLOWED_PROMPTS = {"announcer"}
+for _, lang in SUPPORTED_LANGS:
+ for prefix in ("", f"v2{os.path.sep}"):
+ for n in range(10):
+ ALLOWED_PROMPTS.add(f"{prefix}{lang}_speaker_{n}")
+
+
+logger = logging.getLogger(__name__)
+
+
+CUR_PATH = os.path.dirname(os.path.abspath(__file__))
+
+
+#default_cache_dir = os.path.join(os.path.expanduser("~"), ".cache")
+#CACHE_DIR = os.path.join(os.getenv("XDG_CACHE_HOME", default_cache_dir), "suno", "bark_v0")
+#CACHE_DIR = os.path.join(os.getcwd(), "models"
+CACHE_DIR = "./models"
+
+
+def _cast_bool_env_var(s):
+ return s.lower() in ('true', '1', 't')
+
+USE_SMALL_MODELS = _cast_bool_env_var(os.environ.get("SUNO_USE_SMALL_MODELS", "False"))
+GLOBAL_ENABLE_MPS = _cast_bool_env_var(os.environ.get("SUNO_ENABLE_MPS", "False"))
+OFFLOAD_CPU = _cast_bool_env_var(os.environ.get("SUNO_OFFLOAD_CPU", "False"))
+
+REMOTE_MODEL_PATHS = {
+ "text_small": {
+ "repo_id": "suno/bark",
+ "file_name": "text.pt",
+ },
+ "coarse_small": {
+ "repo_id": "suno/bark",
+ "file_name": "coarse.pt",
+ },
+ "fine_small": {
+ "repo_id": "suno/bark",
+ "file_name": "fine.pt",
+ },
+ "text": {
+ "repo_id": "suno/bark",
+ "file_name": "text_2.pt",
+ },
+ "coarse": {
+ "repo_id": "suno/bark",
+ "file_name": "coarse_2.pt",
+ },
+ "fine": {
+ "repo_id": "suno/bark",
+ "file_name": "fine_2.pt",
+ },
+}
+
+
+if not hasattr(torch.nn.functional, 'scaled_dot_product_attention') and torch.cuda.is_available():
+ logger.warning(
+ "torch version does not support flash attention. You will get faster" +
+ " inference speed by upgrade torch to newest nightly version."
+ )
+
+
+def grab_best_device(use_gpu=True):
+ if torch.cuda.device_count() > 0 and use_gpu:
+ device = "cuda"
+ elif torch.backends.mps.is_available() and use_gpu and GLOBAL_ENABLE_MPS:
+ device = "mps"
+ else:
+ device = "cpu"
+ return device
+
+
+def _get_ckpt_path(model_type, use_small=False):
+ key = model_type
+ if use_small or USE_SMALL_MODELS:
+ key += "_small"
+ return os.path.join(CACHE_DIR, REMOTE_MODEL_PATHS[key]["file_name"])
+
+"""
+def _download(from_hf_path, file_name, destfilename):
+ os.makedirs(CACHE_DIR, exist_ok=True)
+ hf_hub_download(repo_id=from_hf_path, filename=file_name, local_dir=CACHE_DIR, local_dir_use_symlinks=False)
+ # Bug in original repo? Downloaded name differs from expected...
+ if not os.path.exists(destfilename):
+ localname = os.path.join(CACHE_DIR, file_name)
+ os.rename(localname, destfilename)
+"""
+def _download(from_hf_path, file_name):
+ os.makedirs(CACHE_DIR, exist_ok=True)
+ hf_hub_download(repo_id=from_hf_path, filename=file_name, local_dir=CACHE_DIR)
+
+
+class InferenceContext:
+ def __init__(self, benchmark=False):
+ # we can't expect inputs to be the same length, so disable benchmarking by default
+ self._chosen_cudnn_benchmark = benchmark
+ self._cudnn_benchmark = None
+
+ def __enter__(self):
+ self._cudnn_benchmark = torch.backends.cudnn.benchmark
+ torch.backends.cudnn.benchmark = self._chosen_cudnn_benchmark
+
+ def __exit__(self, exc_type, exc_value, exc_traceback):
+ torch.backends.cudnn.benchmark = self._cudnn_benchmark
+
+
+if torch.cuda.is_available():
+ torch.backends.cuda.matmul.allow_tf32 = True
+ torch.backends.cudnn.allow_tf32 = True
+
+
+@contextlib.contextmanager
+def _inference_mode():
+ with InferenceContext(), torch.inference_mode(), torch.no_grad(), autocast():
+ yield
+
+
+def _clear_cuda_cache():
+ if torch.cuda.is_available():
+ torch.cuda.empty_cache()
+ torch.cuda.synchronize()
+
+
+def clean_models(model_key=None):
+ global models
+ model_keys = [model_key] if model_key is not None else models.keys()
+ for k in model_keys:
+ if k in models:
+ del models[k]
+ _clear_cuda_cache()
+ gc.collect()
+
+
+def _load_model(ckpt_path, device, use_small=False, model_type="text"):
+ if model_type == "text":
+ ConfigClass = GPTConfig
+ ModelClass = GPT
+ elif model_type == "coarse":
+ ConfigClass = GPTConfig
+ ModelClass = GPT
+ elif model_type == "fine":
+ ConfigClass = FineGPTConfig
+ ModelClass = FineGPT
+ else:
+ raise NotImplementedError()
+
+ # Force-remove Models to allow running on >12Gb GPU
+ # CF: Probably not needed anymore
+ #global models
+ #models.clear()
+ #gc.collect()
+ #torch.cuda.empty_cache()
+ # to here...
+
+ model_key = f"{model_type}_small" if use_small or USE_SMALL_MODELS else model_type
+ model_info = REMOTE_MODEL_PATHS[model_key]
+ if not os.path.exists(ckpt_path):
+ logger.info(f"{model_type} model not found, downloading into `{CACHE_DIR}`.")
+ ## added next two lines to make it super clear which model is being downloaded
+ remote_filename = hf_hub_url(model_info["repo_id"], model_info["file_name"])
+ print(f"Downloading {model_key} {model_info['repo_id']} remote model file {remote_filename} {model_info['file_name']} to {CACHE_DIR}")
+ _download(model_info["repo_id"], model_info["file_name"])
+ # add next line to make it super clear which model is being loaded
+ print(f"Loading {model_key} model from {ckpt_path} to {device}") # added
+ checkpoint = torch.load(ckpt_path, map_location=device)
+ # this is a hack
+ model_args = checkpoint["model_args"]
+ if "input_vocab_size" not in model_args:
+ model_args["input_vocab_size"] = model_args["vocab_size"]
+ model_args["output_vocab_size"] = model_args["vocab_size"]
+ del model_args["vocab_size"]
+ gptconf = ConfigClass(**checkpoint["model_args"])
+ model = ModelClass(gptconf)
+ state_dict = checkpoint["model"]
+ # fixup checkpoint
+ unwanted_prefix = "_orig_mod."
+ for k, v in list(state_dict.items()):
+ if k.startswith(unwanted_prefix):
+ state_dict[k[len(unwanted_prefix) :]] = state_dict.pop(k)
+ extra_keys = set(state_dict.keys()) - set(model.state_dict().keys())
+ extra_keys = set([k for k in extra_keys if not k.endswith(".attn.bias")])
+ missing_keys = set(model.state_dict().keys()) - set(state_dict.keys())
+ missing_keys = set([k for k in missing_keys if not k.endswith(".attn.bias")])
+ if len(extra_keys) != 0:
+ raise ValueError(f"extra keys found: {extra_keys}")
+ if len(missing_keys) != 0:
+ raise ValueError(f"missing keys: {missing_keys}")
+ model.load_state_dict(state_dict, strict=False)
+ n_params = model.get_num_params()
+ val_loss = checkpoint["best_val_loss"].item()
+ logger.info(f"model loaded: {round(n_params/1e6,1)}M params, {round(val_loss,3)} loss")
+ model.eval()
+ model.to(device)
+ del checkpoint, state_dict
+ _clear_cuda_cache()
+ if model_type == "text":
+ tokenizer = BertTokenizer.from_pretrained("bert-base-multilingual-cased")
+ return {
+ "model": model,
+ "tokenizer": tokenizer,
+ }
+ return model
+
+
+def _load_codec_model(device):
+ model = EncodecModel.encodec_model_24khz()
+ model.set_target_bandwidth(6.0)
+ model.eval()
+ model.to(device)
+ _clear_cuda_cache()
+ return model
+
+
+def load_model(use_gpu=True, use_small=False, force_reload=False, model_type="text"):
+ _load_model_f = funcy.partial(_load_model, model_type=model_type, use_small=use_small)
+ if model_type not in ("text", "coarse", "fine"):
+ raise NotImplementedError()
+ global models
+ global models_devices
+ device = grab_best_device(use_gpu=use_gpu)
+ model_key = f"{model_type}"
+ if OFFLOAD_CPU:
+ models_devices[model_key] = device
+ device = "cpu"
+ if model_key not in models or force_reload:
+ ckpt_path = _get_ckpt_path(model_type, use_small=use_small)
+ clean_models(model_key=model_key)
+ model = _load_model_f(ckpt_path, device)
+ models[model_key] = model
+ if model_type == "text":
+ models[model_key]["model"].to(device)
+ else:
+ models[model_key].to(device)
+ return models[model_key]
+
+
+def load_codec_model(use_gpu=True, force_reload=False):
+ global models
+ global models_devices
+ device = grab_best_device(use_gpu=use_gpu)
+ if device == "mps":
+ # encodec doesn't support mps
+ device = "cpu"
+ model_key = "codec"
+ if OFFLOAD_CPU:
+ models_devices[model_key] = device
+ device = "cpu"
+ if model_key not in models or force_reload:
+ clean_models(model_key=model_key)
+ model = _load_codec_model(device)
+ models[model_key] = model
+ models[model_key].to(device)
+ return models[model_key]
+
+
+def preload_models(
+ text_use_gpu=True,
+ text_use_small=False,
+ coarse_use_gpu=True,
+ coarse_use_small=False,
+ fine_use_gpu=True,
+ fine_use_small=False,
+ codec_use_gpu=True,
+ force_reload=False
+):
+ """Load all the necessary models for the pipeline."""
+ if grab_best_device() == "cpu" and (
+ text_use_gpu or coarse_use_gpu or fine_use_gpu or codec_use_gpu
+ ):
+ logger.warning("No GPU being used. Careful, inference might be very slow!")
+ _ = load_model(
+ model_type="text", use_gpu=text_use_gpu, use_small=text_use_small, force_reload=force_reload
+ )
+ _ = load_model(
+ model_type="coarse",
+ use_gpu=coarse_use_gpu,
+ use_small=coarse_use_small,
+ force_reload=force_reload,
+ )
+ _ = load_model(
+ model_type="fine", use_gpu=fine_use_gpu, use_small=fine_use_small, force_reload=force_reload
+ )
+ _ = load_codec_model(use_gpu=codec_use_gpu, force_reload=force_reload)
+
+
+####
+# Generation Functionality
+####
+
+
+def _tokenize(tokenizer, text):
+ return tokenizer.encode(text, add_special_tokens=False)
+
+
+def _detokenize(tokenizer, enc_text):
+ return tokenizer.decode(enc_text)
+
+
+def _normalize_whitespace(text):
+ return re.sub(r"\s+", " ", text).strip()
+
+
+TEXT_ENCODING_OFFSET = 10_048
+SEMANTIC_PAD_TOKEN = 10_000
+TEXT_PAD_TOKEN = 129_595
+SEMANTIC_INFER_TOKEN = 129_599
+
+
+def _load_history_prompt(history_prompt_input):
+ if isinstance(history_prompt_input, str) and history_prompt_input.endswith(".npz"):
+ history_prompt = np.load(history_prompt_input)
+ elif isinstance(history_prompt_input, str):
+ # make sure this works on non-ubuntu
+ history_prompt_input = os.path.join(*history_prompt_input.split("/"))
+# if history_prompt_input not in ALLOWED_PROMPTS:
+# raise ValueError("history prompt not found")
+ history_prompt = np.load(
+ os.path.join(CUR_PATH, "assets", "prompts", f"{history_prompt_input}.npz")
+ )
+ elif isinstance(history_prompt_input, dict):
+ assert("semantic_prompt" in history_prompt_input)
+ assert("coarse_prompt" in history_prompt_input)
+ assert("fine_prompt" in history_prompt_input)
+ history_prompt = history_prompt_input
+ else:
+ raise ValueError("history prompt format unrecognized")
+ return history_prompt
+
+
+def generate_text_semantic(
+ text,
+ history_prompt=None,
+ temp=0.7,
+ top_k=None,
+ top_p=None,
+ silent=False,
+ min_eos_p=0.2,
+ max_gen_duration_s=None,
+ allow_early_stop=True,
+ use_kv_caching=False,
+):
+ """Generate semantic tokens from text."""
+ assert isinstance(text, str)
+ text = _normalize_whitespace(text)
+ assert len(text.strip()) > 0
+ if history_prompt is not None:
+ history_prompt = _load_history_prompt(history_prompt)
+ semantic_history = history_prompt["semantic_prompt"]
+ assert (
+ isinstance(semantic_history, np.ndarray)
+ and len(semantic_history.shape) == 1
+ and len(semantic_history) > 0
+ and semantic_history.min() >= 0
+ and semantic_history.max() <= SEMANTIC_VOCAB_SIZE - 1
+ )
+ else:
+ semantic_history = None
+ # load models if not yet exist
+ global models
+ global models_devices
+ if "text" not in models:
+ preload_models()
+ model_container = models["text"]
+ model = model_container["model"]
+ tokenizer = model_container["tokenizer"]
+ encoded_text = np.array(_tokenize(tokenizer, text)) + TEXT_ENCODING_OFFSET
+ if OFFLOAD_CPU:
+ model.to(models_devices["text"])
+ device = next(model.parameters()).device
+ if len(encoded_text) > 256:
+ p = round((len(encoded_text) - 256) / len(encoded_text) * 100, 1)
+ logger.warning(f"warning, text too long, lopping of last {p}%")
+ encoded_text = encoded_text[:256]
+ encoded_text = np.pad(
+ encoded_text,
+ (0, 256 - len(encoded_text)),
+ constant_values=TEXT_PAD_TOKEN,
+ mode="constant",
+ )
+ if semantic_history is not None:
+ semantic_history = semantic_history.astype(np.int64)
+ # lop off if history is too long, pad if needed
+ semantic_history = semantic_history[-256:]
+ semantic_history = np.pad(
+ semantic_history,
+ (0, 256 - len(semantic_history)),
+ constant_values=SEMANTIC_PAD_TOKEN,
+ mode="constant",
+ )
+ else:
+ semantic_history = np.array([SEMANTIC_PAD_TOKEN] * 256)
+ x = torch.from_numpy(
+ np.hstack([
+ encoded_text, semantic_history, np.array([SEMANTIC_INFER_TOKEN])
+ ]).astype(np.int64)
+ )[None]
+ assert x.shape[1] == 256 + 256 + 1
+ with _inference_mode():
+ x = x.to(device)
+ n_tot_steps = 768
+ # custom tqdm updates since we don't know when eos will occur
+ pbar = tqdm.tqdm(disable=silent, total=100)
+ pbar_state = 0
+ tot_generated_duration_s = 0
+ kv_cache = None
+ for n in range(n_tot_steps):
+ if use_kv_caching and kv_cache is not None:
+ x_input = x[:, [-1]]
+ else:
+ x_input = x
+ logits, kv_cache = model(
+ x_input, merge_context=True, use_cache=use_kv_caching, past_kv=kv_cache
+ )
+ relevant_logits = logits[0, 0, :SEMANTIC_VOCAB_SIZE]
+ if allow_early_stop:
+ relevant_logits = torch.hstack(
+ (relevant_logits, logits[0, 0, [SEMANTIC_PAD_TOKEN]]) # eos
+ )
+ if top_p is not None:
+ # faster to convert to numpy
+ original_device = relevant_logits.device
+ relevant_logits = relevant_logits.detach().cpu().type(torch.float32).numpy()
+ sorted_indices = np.argsort(relevant_logits)[::-1]
+ sorted_logits = relevant_logits[sorted_indices]
+ cumulative_probs = np.cumsum(softmax(sorted_logits))
+ sorted_indices_to_remove = cumulative_probs > top_p
+ sorted_indices_to_remove[1:] = sorted_indices_to_remove[:-1].copy()
+ sorted_indices_to_remove[0] = False
+ relevant_logits[sorted_indices[sorted_indices_to_remove]] = -np.inf
+ relevant_logits = torch.from_numpy(relevant_logits)
+ relevant_logits = relevant_logits.to(original_device)
+ if top_k is not None:
+ v, _ = torch.topk(relevant_logits, min(top_k, relevant_logits.size(-1)))
+ relevant_logits[relevant_logits < v[-1]] = -float("Inf")
+ probs = F.softmax(relevant_logits / temp, dim=-1)
+ # multinomial bugged on mps: shuttle to cpu if necessary
+ inf_device = probs.device
+ if probs.device.type == "mps":
+ probs = probs.to("cpu")
+ item_next = torch.multinomial(probs, num_samples=1)
+ probs = probs.to(inf_device)
+ item_next = item_next.to(inf_device)
+ if allow_early_stop and (
+ item_next == SEMANTIC_VOCAB_SIZE
+ or (min_eos_p is not None and probs[-1] >= min_eos_p)
+ ):
+ # eos found, so break
+ pbar.update(100 - pbar_state)
+ break
+ x = torch.cat((x, item_next[None]), dim=1)
+ tot_generated_duration_s += 1 / SEMANTIC_RATE_HZ
+ if max_gen_duration_s is not None and tot_generated_duration_s > max_gen_duration_s:
+ pbar.update(100 - pbar_state)
+ break
+ if n == n_tot_steps - 1:
+ pbar.update(100 - pbar_state)
+ break
+ del logits, relevant_logits, probs, item_next
+ req_pbar_state = np.min([100, int(round(100 * n / n_tot_steps))])
+ if req_pbar_state > pbar_state:
+ pbar.update(req_pbar_state - pbar_state)
+ pbar_state = req_pbar_state
+ pbar.close()
+ out = x.detach().cpu().numpy().squeeze()[256 + 256 + 1 :]
+ if OFFLOAD_CPU:
+ model.to("cpu")
+ assert all(0 <= out) and all(out < SEMANTIC_VOCAB_SIZE)
+ _clear_cuda_cache()
+ return out
+
+
+def _flatten_codebooks(arr, offset_size=CODEBOOK_SIZE):
+ assert len(arr.shape) == 2
+ arr = arr.copy()
+ if offset_size is not None:
+ for n in range(1, arr.shape[0]):
+ arr[n, :] += offset_size * n
+ flat_arr = arr.ravel("F")
+ return flat_arr
+
+
+COARSE_SEMANTIC_PAD_TOKEN = 12_048
+COARSE_INFER_TOKEN = 12_050
+
+
+def generate_coarse(
+ x_semantic,
+ history_prompt=None,
+ temp=0.7,
+ top_k=None,
+ top_p=None,
+ silent=False,
+ max_coarse_history=630, # min 60 (faster), max 630 (more context)
+ sliding_window_len=60,
+ use_kv_caching=False,
+):
+ """Generate coarse audio codes from semantic tokens."""
+# CF: Uncommented because it breaks swap voice more than once
+# assert (
+# isinstance(x_semantic, np.ndarray)
+# and len(x_semantic.shape) == 1
+# and len(x_semantic) > 0
+# and x_semantic.min() >= 0
+# and x_semantic.max() <= SEMANTIC_VOCAB_SIZE - 1
+# )
+ assert 60 <= max_coarse_history <= 630
+ assert max_coarse_history + sliding_window_len <= 1024 - 256
+ semantic_to_coarse_ratio = COARSE_RATE_HZ / SEMANTIC_RATE_HZ * N_COARSE_CODEBOOKS
+ max_semantic_history = int(np.floor(max_coarse_history / semantic_to_coarse_ratio))
+ if history_prompt is not None:
+ history_prompt = _load_history_prompt(history_prompt)
+ x_semantic_history = history_prompt["semantic_prompt"]
+ x_coarse_history = history_prompt["coarse_prompt"]
+ assert (
+ isinstance(x_semantic_history, np.ndarray)
+ and len(x_semantic_history.shape) == 1
+ and len(x_semantic_history) > 0
+ and x_semantic_history.min() >= 0
+ and x_semantic_history.max() <= SEMANTIC_VOCAB_SIZE - 1
+ and isinstance(x_coarse_history, np.ndarray)
+ and len(x_coarse_history.shape) == 2
+ and x_coarse_history.shape[0] == N_COARSE_CODEBOOKS
+ and x_coarse_history.shape[-1] >= 0
+ and x_coarse_history.min() >= 0
+ and x_coarse_history.max() <= CODEBOOK_SIZE - 1
+ #and (
+ # round(x_coarse_history.shape[-1] / len(x_semantic_history), 1)
+ # == round(semantic_to_coarse_ratio / N_COARSE_CODEBOOKS, 1)
+ #)
+ )
+ x_coarse_history = _flatten_codebooks(x_coarse_history) + SEMANTIC_VOCAB_SIZE
+ # trim histories correctly
+ n_semantic_hist_provided = np.min(
+ [
+ max_semantic_history,
+ len(x_semantic_history) - len(x_semantic_history) % 2,
+ int(np.floor(len(x_coarse_history) / semantic_to_coarse_ratio)),
+ ]
+ )
+ n_coarse_hist_provided = int(round(n_semantic_hist_provided * semantic_to_coarse_ratio))
+ x_semantic_history = x_semantic_history[-n_semantic_hist_provided:].astype(np.int32)
+ x_coarse_history = x_coarse_history[-n_coarse_hist_provided:].astype(np.int32)
+ # TODO: bit of a hack for time alignment (sounds better)
+ x_coarse_history = x_coarse_history[:-2]
+ else:
+ x_semantic_history = np.array([], dtype=np.int32)
+ x_coarse_history = np.array([], dtype=np.int32)
+ # load models if not yet exist
+ global models
+ global models_devices
+ if "coarse" not in models:
+ preload_models()
+ model = models["coarse"]
+ if OFFLOAD_CPU:
+ model.to(models_devices["coarse"])
+ device = next(model.parameters()).device
+ # start loop
+ n_steps = int(
+ round(
+ np.floor(len(x_semantic) * semantic_to_coarse_ratio / N_COARSE_CODEBOOKS)
+ * N_COARSE_CODEBOOKS
+ )
+ )
+ assert n_steps > 0 and n_steps % N_COARSE_CODEBOOKS == 0
+ x_semantic = np.hstack([x_semantic_history, x_semantic]).astype(np.int32)
+ x_coarse = x_coarse_history.astype(np.int32)
+ base_semantic_idx = len(x_semantic_history)
+ with _inference_mode():
+ x_semantic_in = torch.from_numpy(x_semantic)[None].to(device)
+ x_coarse_in = torch.from_numpy(x_coarse)[None].to(device)
+ n_window_steps = int(np.ceil(n_steps / sliding_window_len))
+ n_step = 0
+ for _ in tqdm.tqdm(range(n_window_steps), total=n_window_steps, disable=silent):
+ semantic_idx = base_semantic_idx + int(round(n_step / semantic_to_coarse_ratio))
+ # pad from right side
+ x_in = x_semantic_in[:, np.max([0, semantic_idx - max_semantic_history]) :]
+ x_in = x_in[:, :256]
+ x_in = F.pad(
+ x_in,
+ (0, 256 - x_in.shape[-1]),
+ "constant",
+ COARSE_SEMANTIC_PAD_TOKEN,
+ )
+ x_in = torch.hstack(
+ [
+ x_in,
+ torch.tensor([COARSE_INFER_TOKEN])[None].to(device),
+ x_coarse_in[:, -max_coarse_history:],
+ ]
+ )
+ kv_cache = None
+ for _ in range(sliding_window_len):
+ if n_step >= n_steps:
+ continue
+ is_major_step = n_step % N_COARSE_CODEBOOKS == 0
+
+ if use_kv_caching and kv_cache is not None:
+ x_input = x_in[:, [-1]]
+ else:
+ x_input = x_in
+
+ logits, kv_cache = model(x_input, use_cache=use_kv_caching, past_kv=kv_cache)
+ logit_start_idx = (
+ SEMANTIC_VOCAB_SIZE + (1 - int(is_major_step)) * CODEBOOK_SIZE
+ )
+ logit_end_idx = (
+ SEMANTIC_VOCAB_SIZE + (2 - int(is_major_step)) * CODEBOOK_SIZE
+ )
+ relevant_logits = logits[0, 0, logit_start_idx:logit_end_idx]
+ if top_p is not None:
+ # faster to convert to numpy
+ original_device = relevant_logits.device
+ relevant_logits = relevant_logits.detach().cpu().type(torch.float32).numpy()
+ sorted_indices = np.argsort(relevant_logits)[::-1]
+ sorted_logits = relevant_logits[sorted_indices]
+ cumulative_probs = np.cumsum(softmax(sorted_logits))
+ sorted_indices_to_remove = cumulative_probs > top_p
+ sorted_indices_to_remove[1:] = sorted_indices_to_remove[:-1].copy()
+ sorted_indices_to_remove[0] = False
+ relevant_logits[sorted_indices[sorted_indices_to_remove]] = -np.inf
+ relevant_logits = torch.from_numpy(relevant_logits)
+ relevant_logits = relevant_logits.to(original_device)
+ if top_k is not None:
+ v, _ = torch.topk(relevant_logits, min(top_k, relevant_logits.size(-1)))
+ relevant_logits[relevant_logits < v[-1]] = -float("Inf")
+ probs = F.softmax(relevant_logits / temp, dim=-1)
+ # multinomial bugged on mps: shuttle to cpu if necessary
+ inf_device = probs.device
+ if probs.device.type == "mps":
+ probs = probs.to("cpu")
+ item_next = torch.multinomial(probs, num_samples=1)
+ probs = probs.to(inf_device)
+ item_next = item_next.to(inf_device)
+ item_next += logit_start_idx
+ x_coarse_in = torch.cat((x_coarse_in, item_next[None]), dim=1)
+ x_in = torch.cat((x_in, item_next[None]), dim=1)
+ del logits, relevant_logits, probs, item_next
+ n_step += 1
+ del x_in
+ del x_semantic_in
+ if OFFLOAD_CPU:
+ model.to("cpu")
+ gen_coarse_arr = x_coarse_in.detach().cpu().numpy().squeeze()[len(x_coarse_history) :]
+ del x_coarse_in
+ assert len(gen_coarse_arr) == n_steps
+ gen_coarse_audio_arr = gen_coarse_arr.reshape(-1, N_COARSE_CODEBOOKS).T - SEMANTIC_VOCAB_SIZE
+ for n in range(1, N_COARSE_CODEBOOKS):
+ gen_coarse_audio_arr[n, :] -= n * CODEBOOK_SIZE
+ _clear_cuda_cache()
+ return gen_coarse_audio_arr
+
+
+def generate_fine(
+ x_coarse_gen,
+ history_prompt=None,
+ temp=0.5,
+ silent=True,
+):
+ """Generate full audio codes from coarse audio codes."""
+ assert (
+ isinstance(x_coarse_gen, np.ndarray)
+ and len(x_coarse_gen.shape) == 2
+ and 1 <= x_coarse_gen.shape[0] <= N_FINE_CODEBOOKS - 1
+ and x_coarse_gen.shape[1] > 0
+ and x_coarse_gen.min() >= 0
+ and x_coarse_gen.max() <= CODEBOOK_SIZE - 1
+ )
+ if history_prompt is not None:
+ history_prompt = _load_history_prompt(history_prompt)
+ x_fine_history = history_prompt["fine_prompt"]
+ assert (
+ isinstance(x_fine_history, np.ndarray)
+ and len(x_fine_history.shape) == 2
+ and x_fine_history.shape[0] == N_FINE_CODEBOOKS
+ and x_fine_history.shape[1] >= 0
+ and x_fine_history.min() >= 0
+ and x_fine_history.max() <= CODEBOOK_SIZE - 1
+ )
+ else:
+ x_fine_history = None
+ n_coarse = x_coarse_gen.shape[0]
+ # load models if not yet exist
+ global models
+ global models_devices
+ if "fine" not in models:
+ preload_models()
+ model = models["fine"]
+ if OFFLOAD_CPU:
+ model.to(models_devices["fine"])
+ device = next(model.parameters()).device
+ # make input arr
+ in_arr = np.vstack(
+ [
+ x_coarse_gen,
+ np.zeros((N_FINE_CODEBOOKS - n_coarse, x_coarse_gen.shape[1]))
+ + CODEBOOK_SIZE, # padding
+ ]
+ ).astype(np.int32)
+ # prepend history if available (max 512)
+ if x_fine_history is not None:
+ x_fine_history = x_fine_history.astype(np.int32)
+ in_arr = np.hstack(
+ [
+ x_fine_history[:, -512:].astype(np.int32),
+ in_arr,
+ ]
+ )
+ n_history = x_fine_history[:, -512:].shape[1]
+ else:
+ n_history = 0
+ n_remove_from_end = 0
+ # need to pad if too short (since non-causal model)
+ if in_arr.shape[1] < 1024:
+ n_remove_from_end = 1024 - in_arr.shape[1]
+ in_arr = np.hstack(
+ [
+ in_arr,
+ np.zeros((N_FINE_CODEBOOKS, n_remove_from_end), dtype=np.int32) + CODEBOOK_SIZE,
+ ]
+ )
+ # we can be lazy about fractional loop and just keep overwriting codebooks
+ n_loops = np.max([0, int(np.ceil((x_coarse_gen.shape[1] - (1024 - n_history)) / 512))]) + 1
+ with _inference_mode():
+ in_arr = torch.tensor(in_arr.T).to(device)
+ for n in tqdm.tqdm(range(n_loops), disable=silent):
+ start_idx = np.min([n * 512, in_arr.shape[0] - 1024])
+ start_fill_idx = np.min([n_history + n * 512, in_arr.shape[0] - 512])
+ rel_start_fill_idx = start_fill_idx - start_idx
+ in_buffer = in_arr[start_idx : start_idx + 1024, :][None]
+ for nn in range(n_coarse, N_FINE_CODEBOOKS):
+ logits = model(nn, in_buffer)
+ if temp is None:
+ relevant_logits = logits[0, rel_start_fill_idx:, :CODEBOOK_SIZE]
+ codebook_preds = torch.argmax(relevant_logits, -1)
+ else:
+ relevant_logits = logits[0, :, :CODEBOOK_SIZE] / temp
+ probs = F.softmax(relevant_logits, dim=-1)
+ # multinomial bugged on mps: shuttle to cpu if necessary
+ inf_device = probs.device
+ if probs.device.type == "mps":
+ probs = probs.to("cpu")
+ codebook_preds = torch.hstack(
+ [
+ torch.multinomial(probs[nnn], num_samples=1).to(inf_device)
+ for nnn in range(rel_start_fill_idx, 1024)
+ ]
+ )
+ in_buffer[0, rel_start_fill_idx:, nn] = codebook_preds
+ del logits, codebook_preds
+ # transfer over info into model_in and convert to numpy
+ for nn in range(n_coarse, N_FINE_CODEBOOKS):
+ in_arr[
+ start_fill_idx : start_fill_idx + (1024 - rel_start_fill_idx), nn
+ ] = in_buffer[0, rel_start_fill_idx:, nn]
+ del in_buffer
+ gen_fine_arr = in_arr.detach().cpu().numpy().squeeze().T
+ del in_arr
+ if OFFLOAD_CPU:
+ model.to("cpu")
+ gen_fine_arr = gen_fine_arr[:, n_history:]
+ if n_remove_from_end > 0:
+ gen_fine_arr = gen_fine_arr[:, :-n_remove_from_end]
+ assert gen_fine_arr.shape[-1] == x_coarse_gen.shape[-1]
+ _clear_cuda_cache()
+ return gen_fine_arr
+
+
+def codec_decode(fine_tokens):
+ """Turn quantized audio codes into audio array using encodec."""
+ # load models if not yet exist
+ global models
+ global models_devices
+ if "codec" not in models:
+ preload_models()
+ model = models["codec"]
+ if OFFLOAD_CPU:
+ model.to(models_devices["codec"])
+ device = next(model.parameters()).device
+ arr = torch.from_numpy(fine_tokens)[None]
+ arr = arr.to(device)
+ arr = arr.transpose(0, 1)
+ emb = model.quantizer.decode(arr)
+ out = model.decoder(emb)
+ audio_arr = out.detach().cpu().numpy().squeeze()
+ del arr, emb, out
+ if OFFLOAD_CPU:
+ model.to("cpu")
+ return audio_arr
diff --git a/bark/hubert/__init__.py b/bark/hubert/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/bark/hubert/customtokenizer.py b/bark/hubert/customtokenizer.py
new file mode 100644
index 0000000000000000000000000000000000000000..d0cbdbf30285c9b707aa5e11eb63dff0902bbb96
--- /dev/null
+++ b/bark/hubert/customtokenizer.py
@@ -0,0 +1,195 @@
+"""
+Custom tokenizer model.
+Author: https://www.github.com/gitmylo/
+License: MIT
+"""
+
+import json
+import os.path
+from zipfile import ZipFile
+
+import numpy
+import torch
+from torch import nn, optim
+from torch.serialization import MAP_LOCATION
+from tqdm.auto import tqdm
+
+
+class CustomTokenizer(nn.Module):
+ def __init__(self, hidden_size=1024, input_size=768, output_size=10000, version=0):
+ super(CustomTokenizer, self).__init__()
+ next_size = input_size
+ if version == 0:
+ self.lstm = nn.LSTM(input_size, hidden_size, 2, batch_first=True)
+ next_size = hidden_size
+ if version == 1:
+ self.lstm = nn.LSTM(input_size, hidden_size, 2, batch_first=True)
+ self.intermediate = nn.Linear(hidden_size, 4096)
+ next_size = 4096
+
+ self.fc = nn.Linear(next_size, output_size)
+ self.softmax = nn.LogSoftmax(dim=1)
+ self.optimizer: optim.Optimizer = None
+ self.lossfunc = nn.CrossEntropyLoss()
+ self.input_size = input_size
+ self.hidden_size = hidden_size
+ self.output_size = output_size
+ self.version = version
+
+ def forward(self, x):
+ x, _ = self.lstm(x)
+ if self.version == 1:
+ x = self.intermediate(x)
+ x = self.fc(x)
+ x = self.softmax(x)
+ return x
+
+ @torch.no_grad()
+ def get_token(self, x):
+ """
+ Used to get the token for the first
+ :param x: An array with shape (N, input_size) where N is a whole number greater or equal to 1, and input_size is the input size used when creating the model.
+ :return: An array with shape (N,) where N is the same as N from the input. Every number in the array is a whole number in range 0...output_size - 1 where output_size is the output size used when creating the model.
+ """
+ return torch.argmax(self(x), dim=1)
+
+ def prepare_training(self):
+ self.optimizer = optim.Adam(self.parameters(), 0.001)
+
+ def train_step(self, x_train, y_train, log_loss=False):
+ # y_train = y_train[:-1]
+ # y_train = y_train[1:]
+
+ optimizer = self.optimizer
+ lossfunc = self.lossfunc
+ # Zero the gradients
+ self.zero_grad()
+
+ # Forward pass
+ y_pred = self(x_train)
+
+ y_train_len = len(y_train)
+ y_pred_len = y_pred.shape[0]
+
+ if y_train_len > y_pred_len:
+ diff = y_train_len - y_pred_len
+ y_train = y_train[diff:]
+ elif y_train_len < y_pred_len:
+ diff = y_pred_len - y_train_len
+ y_pred = y_pred[:-diff, :]
+
+ y_train_hot = torch.zeros(len(y_train), self.output_size)
+ y_train_hot[range(len(y_train)), y_train] = 1
+ y_train_hot = y_train_hot.to('cuda')
+
+ # Calculate the loss
+ loss = lossfunc(y_pred, y_train_hot)
+
+ # Print loss
+ if log_loss:
+ print('Loss', loss.item())
+
+ # Backward pass
+ loss.backward()
+
+ # Update the weights
+ optimizer.step()
+
+ def save(self, path):
+ info_path = '.'.join(os.path.basename(path).split('.')[:-1]) + '/.info'
+ torch.save(self.state_dict(), path)
+ data_from_model = Data(self.input_size, self.hidden_size, self.output_size, self.version)
+ with ZipFile(path, 'a') as model_zip:
+ model_zip.writestr(info_path, data_from_model.save())
+ model_zip.close()
+
+ @staticmethod
+ def load_from_checkpoint(path, map_location: MAP_LOCATION = None):
+ old = True
+ with ZipFile(path) as model_zip:
+ filesMatch = [file for file in model_zip.namelist() if file.endswith('/.info')]
+ file = filesMatch[0] if filesMatch else None
+ if file:
+ old = False
+ print(f"Loading Custom Hubert Tokenizer {path}")
+ data_from_model = Data.load(model_zip.read(file).decode('utf-8'))
+ model_zip.close()
+ if old:
+ model = CustomTokenizer()
+ else:
+ model = CustomTokenizer(data_from_model.hidden_size, data_from_model.input_size, data_from_model.output_size, data_from_model.version)
+ model.load_state_dict(torch.load(path))
+ if map_location:
+ model = model.to(map_location)
+ return model
+
+
+
+class Data:
+ input_size: int
+ hidden_size: int
+ output_size: int
+ version: int
+
+ def __init__(self, input_size=768, hidden_size=1024, output_size=10000, version=0):
+ self.input_size = input_size
+ self.hidden_size = hidden_size
+ self.output_size = output_size
+ self.version = version
+
+ @staticmethod
+ def load(string):
+ data = json.loads(string)
+ return Data(data['input_size'], data['hidden_size'], data['output_size'], data['version'])
+
+ def save(self):
+ data = {
+ 'input_size': self.input_size,
+ 'hidden_size': self.hidden_size,
+ 'output_size': self.output_size,
+ 'version': self.version,
+ }
+ return json.dumps(data)
+
+
+def auto_train(data_path, save_path='model.pth', load_model: str | None = None, save_epochs=1, max_epochs=14):
+ data_x, data_y = [], []
+
+ if load_model and os.path.isfile(load_model):
+ print('Loading model from', load_model)
+ model_training = CustomTokenizer.load_from_checkpoint(load_model, 'cuda')
+ else:
+ print('Creating new model.')
+ model_training = CustomTokenizer(version=1).to('cuda') # Settings for the model to run without lstm
+ save_path = os.path.join(data_path, save_path)
+ base_save_path = '.'.join(save_path.split('.')[:-1])
+
+ sem_string = '_semantic.npy'
+ feat_string = '_semantic_features.npy'
+
+ ready = os.path.join(data_path, 'ready')
+ for input_file in os.listdir(ready):
+ full_path = os.path.join(ready, input_file)
+ if input_file.endswith(sem_string):
+ data_y.append(numpy.load(full_path))
+ elif input_file.endswith(feat_string):
+ data_x.append(numpy.load(full_path))
+ model_training.prepare_training()
+
+ epoch = 1
+ with tqdm(total=((len(data_x) * len(data_y)) / 50) * save_epochs) as pbar1:
+ while epoch <= max_epochs:
+ for i in range(save_epochs):
+ j = 0
+ for x, y in zip(data_x, data_y):
+ model_training.train_step(torch.tensor(x).to('cuda'), torch.tensor(y).to('cuda'), j % 50 == 0) # Print loss every 50 steps
+ j += 1
+ pbar1.update()
+
+ save_p = save_path
+ save_p_2 = f'{base_save_path}_epoch_{epoch}.pth'
+ model_training.save(save_p)
+ model_training.save(save_p_2)
+ print(f'Epoch {epoch} completed')
+ epoch += 1
+ print(f'Done training for {max_epochs} epochs!')
\ No newline at end of file
diff --git a/bark/hubert/hubert_manager.py b/bark/hubert/hubert_manager.py
new file mode 100644
index 0000000000000000000000000000000000000000..1a6c2fb1a878e5e54d78d9d50826a508fedff88c
--- /dev/null
+++ b/bark/hubert/hubert_manager.py
@@ -0,0 +1,48 @@
+import os.path
+import shutil
+import urllib.request
+
+import huggingface_hub
+
+
+class HuBERTManager:
+
+
+ @staticmethod
+ def make_sure_hubert_installed(download_url: str = 'https://dl.fbaipublicfiles.com/hubert/hubert_base_ls960.pt', file_name: str = 'hubert.pt'):
+ install_dir = os.path.join('models', 'hubert')
+ if not os.path.isdir(install_dir):
+ os.makedirs(install_dir, exist_ok=True)
+ install_file = os.path.join(install_dir, file_name)
+ if not os.path.isfile(install_file):
+ print(f'Downloading HuBERT base model from {download_url}')
+ urllib.request.urlretrieve(download_url, install_file)
+ print('Downloaded HuBERT')
+ return install_file
+
+
+ @staticmethod
+ def make_sure_tokenizer_installed(model: str = 'quantifier_hubert_base_ls960_14.pth', repo: str = 'GitMylo/bark-voice-cloning', tokenizer_lang: str = 'en'):
+ local_file = tokenizer_lang + '_tokenizer.pth'
+ install_dir = os.path.join('models', 'hubert')
+ if not os.path.isdir(install_dir):
+ os.makedirs(install_dir, exist_ok=True)
+ install_file = os.path.join(install_dir, local_file)
+ if not os.path.isfile(install_file):
+ # refactor to use lists
+ if tokenizer_lang == 'en':
+ repo = 'GitMylo/bark-voice-cloning'
+ model = 'quantifier_hubert_base_ls960_14.pth'
+ elif tokenizer_lang == 'de':
+ repo = 'CountFloyd/bark-voice-cloning-german-HuBERT-quantizer'
+ model = 'german-HuBERT-quantizer_14_epoch.pth'
+ elif tokenizer_lang == 'pl':
+ repo = 'Hobis/bark-voice-cloning-polish-HuBERT-quantizer'
+ model = 'polish-HuBERT-quantizer_8_epoch.pth'
+ else:
+ raise 'Unknown Tokenizer Language!'
+ print(f'{local_file} not found. Downloading HuBERT custom tokenizer')
+ huggingface_hub.hf_hub_download(repo, model, local_dir=install_dir, local_dir_use_symlinks=False)
+ shutil.move(os.path.join(install_dir, model), install_file)
+ print('Downloaded tokenizer')
+ return install_file
diff --git a/bark/hubert/pre_kmeans_hubert.py b/bark/hubert/pre_kmeans_hubert.py
new file mode 100644
index 0000000000000000000000000000000000000000..5208bd2792dd32e7f761ae787927a70bdcb2e5d6
--- /dev/null
+++ b/bark/hubert/pre_kmeans_hubert.py
@@ -0,0 +1,107 @@
+"""
+Modified HuBERT model without kmeans.
+Original author: https://github.com/lucidrains/
+Modified by: https://www.github.com/gitmylo/
+License: MIT
+"""
+
+# Modified code from https://github.com/lucidrains/audiolm-pytorch/blob/main/audiolm_pytorch/hubert_kmeans.py
+
+from pathlib import Path
+
+import torch
+from torch import nn
+from einops import pack, unpack
+
+import fairseq
+
+from torchaudio.functional import resample
+
+from audiolm_pytorch.utils import curtail_to_multiple
+
+import logging
+logging.root.setLevel(logging.ERROR)
+
+
+def exists(val):
+ return val is not None
+
+
+def default(val, d):
+ return val if exists(val) else d
+
+
+class CustomHubert(nn.Module):
+ """
+ checkpoint and kmeans can be downloaded at https://github.com/facebookresearch/fairseq/tree/main/examples/hubert
+ or you can train your own
+ """
+
+ def __init__(
+ self,
+ checkpoint_path,
+ target_sample_hz=16000,
+ seq_len_multiple_of=None,
+ output_layer=9,
+ device=None
+ ):
+ super().__init__()
+ self.target_sample_hz = target_sample_hz
+ self.seq_len_multiple_of = seq_len_multiple_of
+ self.output_layer = output_layer
+
+ if device is not None:
+ self.to(device)
+
+ model_path = Path(checkpoint_path)
+
+ assert model_path.exists(), f'path {checkpoint_path} does not exist'
+
+ print(f"Loading Hubert {checkpoint_path}")
+ checkpoint = torch.load(checkpoint_path)
+ load_model_input = {checkpoint_path: checkpoint}
+ model, *_ = fairseq.checkpoint_utils.load_model_ensemble_and_task(load_model_input)
+
+ if device is not None:
+ model[0].to(device)
+
+ self.model = model[0]
+ self.model.eval()
+
+ @property
+ def groups(self):
+ return 1
+
+ @torch.no_grad()
+ def forward(
+ self,
+ wav_input,
+ flatten=True,
+ input_sample_hz=None
+ ):
+ device = wav_input.device
+
+ if exists(input_sample_hz):
+ wav_input = resample(wav_input, input_sample_hz, self.target_sample_hz)
+
+ if exists(self.seq_len_multiple_of):
+ wav_input = curtail_to_multiple(wav_input, self.seq_len_multiple_of)
+
+ embed = self.model(
+ wav_input,
+ features_only=True,
+ mask=False, # thanks to @maitycyrus for noticing that mask is defaulted to True in the fairseq code
+ output_layer=self.output_layer
+ )
+
+ embed, packed_shape = pack([embed['x']], '* d')
+
+ # codebook_indices = self.kmeans.predict(embed.cpu().detach().numpy())
+
+ codebook_indices = torch.from_numpy(embed.cpu().detach().numpy()).to(device) # .long()
+
+ if flatten:
+ return codebook_indices
+
+ codebook_indices, = unpack(codebook_indices, packed_shape, '*')
+ return codebook_indices
diff --git a/bark/model.py b/bark/model.py
new file mode 100644
index 0000000000000000000000000000000000000000..457b49e749f396c47c6b35f44955fd512d233d79
--- /dev/null
+++ b/bark/model.py
@@ -0,0 +1,218 @@
+"""
+Much of this code is adapted from Andrej Karpathy's NanoGPT
+(https://github.com/karpathy/nanoGPT)
+"""
+import math
+from dataclasses import dataclass
+
+import torch
+import torch.nn as nn
+from torch.nn import functional as F
+
+class LayerNorm(nn.Module):
+ """ LayerNorm but with an optional bias. PyTorch doesn't support simply bias=False """
+
+ def __init__(self, ndim, bias):
+ super().__init__()
+ self.weight = nn.Parameter(torch.ones(ndim))
+ self.bias = nn.Parameter(torch.zeros(ndim)) if bias else None
+
+ def forward(self, input):
+ return F.layer_norm(input, self.weight.shape, self.weight, self.bias, 1e-5)
+
+class CausalSelfAttention(nn.Module):
+
+ def __init__(self, config):
+ super().__init__()
+ assert config.n_embd % config.n_head == 0
+ # key, query, value projections for all heads, but in a batch
+ self.c_attn = nn.Linear(config.n_embd, 3 * config.n_embd, bias=config.bias)
+ # output projection
+ self.c_proj = nn.Linear(config.n_embd, config.n_embd, bias=config.bias)
+ # regularization
+ self.attn_dropout = nn.Dropout(config.dropout)
+ self.resid_dropout = nn.Dropout(config.dropout)
+ self.n_head = config.n_head
+ self.n_embd = config.n_embd
+ self.dropout = config.dropout
+ # flash attention make GPU go brrrrr but support is only in PyTorch nightly and still a bit scary
+ self.flash = hasattr(torch.nn.functional, 'scaled_dot_product_attention')
+ if not self.flash:
+ # print("WARNING: using slow attention. Flash Attention atm needs PyTorch nightly and dropout=0.0")
+ # causal mask to ensure that attention is only applied to the left in the input sequence
+ self.register_buffer("bias", torch.tril(torch.ones(config.block_size, config.block_size))
+ .view(1, 1, config.block_size, config.block_size))
+
+ def forward(self, x, past_kv=None, use_cache=False):
+ B, T, C = x.size() # batch size, sequence length, embedding dimensionality (n_embd)
+
+ # calculate query, key, values for all heads in batch and move head forward to be the batch dim
+ q, k ,v = self.c_attn(x).split(self.n_embd, dim=2)
+ k = k.view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)
+ q = q.view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)
+ v = v.view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)
+
+ if past_kv is not None:
+ past_key = past_kv[0]
+ past_value = past_kv[1]
+ k = torch.cat((past_key, k), dim=-2)
+ v = torch.cat((past_value, v), dim=-2)
+
+ FULL_T = k.shape[-2]
+
+ if use_cache is True:
+ present = (k, v)
+ else:
+ present = None
+
+ # causal self-attention; Self-attend: (B, nh, T, hs) x (B, nh, hs, T) -> (B, nh, T, T)
+ if self.flash:
+ # efficient attention using Flash Attention CUDA kernels
+ if past_kv is not None:
+ # When `past_kv` is provided, we're doing incremental decoding and `q.shape[2] == 1`: q only contains
+ # the query for the last token. scaled_dot_product_attention interprets this as the first token in the
+ # sequence, so if is_causal=True it will mask out all attention from it. This is not what we want, so
+ # to work around this we set is_causal=False.
+ is_causal = False
+ else:
+ is_causal = True
+
+ y = torch.nn.functional.scaled_dot_product_attention(q, k, v, dropout_p=self.dropout, is_causal=is_causal)
+ else:
+ # manual implementation of attention
+ att = (q @ k.transpose(-2, -1)) * (1.0 / math.sqrt(k.size(-1)))
+ att = att.masked_fill(self.bias[:,:,FULL_T-T:FULL_T,:FULL_T] == 0, float('-inf'))
+ att = F.softmax(att, dim=-1)
+ att = self.attn_dropout(att)
+ y = att @ v # (B, nh, T, T) x (B, nh, T, hs) -> (B, nh, T, hs)
+ y = y.transpose(1, 2).contiguous().view(B, T, C) # re-assemble all head outputs side by side
+
+ # output projection
+ y = self.resid_dropout(self.c_proj(y))
+ return (y, present)
+
+class MLP(nn.Module):
+
+ def __init__(self, config):
+ super().__init__()
+ self.c_fc = nn.Linear(config.n_embd, 4 * config.n_embd, bias=config.bias)
+ self.c_proj = nn.Linear(4 * config.n_embd, config.n_embd, bias=config.bias)
+ self.dropout = nn.Dropout(config.dropout)
+ self.gelu = nn.GELU()
+
+ def forward(self, x):
+ x = self.c_fc(x)
+ x = self.gelu(x)
+ x = self.c_proj(x)
+ x = self.dropout(x)
+ return x
+
+class Block(nn.Module):
+
+ def __init__(self, config, layer_idx):
+ super().__init__()
+ self.ln_1 = LayerNorm(config.n_embd, bias=config.bias)
+ self.attn = CausalSelfAttention(config)
+ self.ln_2 = LayerNorm(config.n_embd, bias=config.bias)
+ self.mlp = MLP(config)
+ self.layer_idx = layer_idx
+
+ def forward(self, x, past_kv=None, use_cache=False):
+ attn_output, prev_kvs = self.attn(self.ln_1(x), past_kv=past_kv, use_cache=use_cache)
+ x = x + attn_output
+ x = x + self.mlp(self.ln_2(x))
+ return (x, prev_kvs)
+
+@dataclass
+class GPTConfig:
+ block_size: int = 1024
+ input_vocab_size: int = 10_048
+ output_vocab_size: int = 10_048
+ n_layer: int = 12
+ n_head: int = 12
+ n_embd: int = 768
+ dropout: float = 0.0
+ bias: bool = True # True: bias in Linears and LayerNorms, like GPT-2. False: a bit better and faster
+
+class GPT(nn.Module):
+
+ def __init__(self, config):
+ super().__init__()
+ assert config.input_vocab_size is not None
+ assert config.output_vocab_size is not None
+ assert config.block_size is not None
+ self.config = config
+
+ self.transformer = nn.ModuleDict(dict(
+ wte = nn.Embedding(config.input_vocab_size, config.n_embd),
+ wpe = nn.Embedding(config.block_size, config.n_embd),
+ drop = nn.Dropout(config.dropout),
+ h = nn.ModuleList([Block(config, idx) for idx in range(config.n_layer)]),
+ ln_f = LayerNorm(config.n_embd, bias=config.bias),
+ ))
+ self.lm_head = nn.Linear(config.n_embd, config.output_vocab_size, bias=False)
+
+ def get_num_params(self, non_embedding=True):
+ """
+ Return the number of parameters in the model.
+ For non-embedding count (default), the position embeddings get subtracted.
+ The token embeddings would too, except due to the parameter sharing these
+ params are actually used as weights in the final layer, so we include them.
+ """
+ n_params = sum(p.numel() for p in self.parameters())
+ if non_embedding:
+ n_params -= self.transformer.wte.weight.numel()
+ n_params -= self.transformer.wpe.weight.numel()
+ return n_params
+
+ def forward(self, idx, merge_context=False, past_kv=None, position_ids=None, use_cache=False):
+ device = idx.device
+ b, t = idx.size()
+ if past_kv is not None:
+ assert t == 1
+ tok_emb = self.transformer.wte(idx) # token embeddings of shape (b, t, n_embd)
+ else:
+ if merge_context:
+ assert(idx.shape[1] >= 256+256+1)
+ t = idx.shape[1] - 256
+ else:
+ assert t <= self.config.block_size, f"Cannot forward sequence of length {t}, block size is only {self.config.block_size}"
+
+ # forward the GPT model itself
+ if merge_context:
+ tok_emb = torch.cat([
+ self.transformer.wte(idx[:,:256]) + self.transformer.wte(idx[:,256:256+256]),
+ self.transformer.wte(idx[:,256+256:])
+ ], dim=1)
+ else:
+ tok_emb = self.transformer.wte(idx) # token embeddings of shape (b, t, n_embd)
+
+ if past_kv is None:
+ past_length = 0
+ past_kv = tuple([None] * len(self.transformer.h))
+ else:
+ past_length = past_kv[0][0].size(-2)
+
+ if position_ids is None:
+ position_ids = torch.arange(past_length, t + past_length, dtype=torch.long, device=device)
+ position_ids = position_ids.unsqueeze(0) # shape (1, t)
+ assert position_ids.shape == (1, t)
+
+ pos_emb = self.transformer.wpe(position_ids) # position embeddings of shape (1, t, n_embd)
+
+ x = self.transformer.drop(tok_emb + pos_emb)
+
+ new_kv = () if use_cache else None
+
+ for i, (block, past_layer_kv) in enumerate(zip(self.transformer.h, past_kv)):
+ x, kv = block(x, past_kv=past_layer_kv, use_cache=use_cache)
+
+ if use_cache:
+ new_kv = new_kv + (kv,)
+
+ x = self.transformer.ln_f(x)
+
+ # inference-time mini-optimization: only forward the lm_head on the very last position
+ logits = self.lm_head(x[:, [-1], :]) # note: using list [-1] to preserve the time dim
+
+ return (logits, new_kv)
diff --git a/bark/model_fine.py b/bark/model_fine.py
new file mode 100644
index 0000000000000000000000000000000000000000..6179a851319692b10df0d69b00910ad36cee8685
--- /dev/null
+++ b/bark/model_fine.py
@@ -0,0 +1,149 @@
+"""
+Much of this code is adapted from Andrej Karpathy's NanoGPT
+(https://github.com/karpathy/nanoGPT)
+"""
+from dataclasses import dataclass
+import math
+
+import torch
+import torch.nn as nn
+from torch.nn import functional as F
+
+from .model import GPT, GPTConfig, MLP
+
+
+class NonCausalSelfAttention(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ assert config.n_embd % config.n_head == 0
+ # key, query, value projections for all heads, but in a batch
+ self.c_attn = nn.Linear(config.n_embd, 3 * config.n_embd, bias=config.bias)
+ # output projection
+ self.c_proj = nn.Linear(config.n_embd, config.n_embd, bias=config.bias)
+ # regularization
+ self.attn_dropout = nn.Dropout(config.dropout)
+ self.resid_dropout = nn.Dropout(config.dropout)
+ self.n_head = config.n_head
+ self.n_embd = config.n_embd
+ self.dropout = config.dropout
+ # flash attention make GPU go brrrrr but support is only in PyTorch nightly and still a bit scary
+ self.flash = (
+ hasattr(torch.nn.functional, "scaled_dot_product_attention") and self.dropout == 0.0
+ )
+
+ def forward(self, x):
+ B, T, C = x.size() # batch size, sequence length, embedding dimensionality (n_embd)
+
+ # calculate query, key, values for all heads in batch and move head forward to be the batch dim
+ q, k, v = self.c_attn(x).split(self.n_embd, dim=2)
+ k = k.view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)
+ q = q.view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)
+ v = v.view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)
+
+ # causal self-attention; Self-attend: (B, nh, T, hs) x (B, nh, hs, T) -> (B, nh, T, T)
+ if self.flash:
+ # efficient attention using Flash Attention CUDA kernels
+ y = torch.nn.functional.scaled_dot_product_attention(
+ q, k, v, attn_mask=None, dropout_p=self.dropout, is_causal=False
+ )
+ else:
+ # manual implementation of attention
+ att = (q @ k.transpose(-2, -1)) * (1.0 / math.sqrt(k.size(-1)))
+ att = F.softmax(att, dim=-1)
+ att = self.attn_dropout(att)
+ y = att @ v # (B, nh, T, T) x (B, nh, T, hs) -> (B, nh, T, hs)
+ y = (
+ y.transpose(1, 2).contiguous().view(B, T, C)
+ ) # re-assemble all head outputs side by side
+
+ # output projection
+ y = self.resid_dropout(self.c_proj(y))
+ return y
+
+
+class FineBlock(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.ln_1 = nn.LayerNorm(config.n_embd)
+ self.attn = NonCausalSelfAttention(config)
+ self.ln_2 = nn.LayerNorm(config.n_embd)
+ self.mlp = MLP(config)
+
+ def forward(self, x):
+ x = x + self.attn(self.ln_1(x))
+ x = x + self.mlp(self.ln_2(x))
+ return x
+
+
+class FineGPT(GPT):
+ def __init__(self, config):
+ super().__init__(config)
+ del self.lm_head
+ self.config = config
+ self.n_codes_total = config.n_codes_total
+ self.transformer = nn.ModuleDict(
+ dict(
+ wtes=nn.ModuleList(
+ [
+ nn.Embedding(config.input_vocab_size, config.n_embd)
+ for _ in range(config.n_codes_total)
+ ]
+ ),
+ wpe=nn.Embedding(config.block_size, config.n_embd),
+ drop=nn.Dropout(config.dropout),
+ h=nn.ModuleList([FineBlock(config) for _ in range(config.n_layer)]),
+ ln_f=nn.LayerNorm(config.n_embd),
+ )
+ )
+ self.lm_heads = nn.ModuleList(
+ [
+ nn.Linear(config.n_embd, config.output_vocab_size, bias=False)
+ for _ in range(config.n_codes_given, self.n_codes_total)
+ ]
+ )
+ for i in range(self.n_codes_total - config.n_codes_given):
+ self.transformer.wtes[i + 1].weight = self.lm_heads[i].weight
+
+ def forward(self, pred_idx, idx):
+ device = idx.device
+ b, t, codes = idx.size()
+ assert (
+ t <= self.config.block_size
+ ), f"Cannot forward sequence of length {t}, block size is only {self.config.block_size}"
+ assert pred_idx > 0, "cannot predict 0th codebook"
+ assert codes == self.n_codes_total, (b, t, codes)
+ pos = torch.arange(0, t, dtype=torch.long, device=device).unsqueeze(0) # shape (1, t)
+
+ # forward the GPT model itself
+ tok_embs = [
+ wte(idx[:, :, i]).unsqueeze(-1) for i, wte in enumerate(self.transformer.wtes)
+ ] # token embeddings of shape (b, t, n_embd)
+ tok_emb = torch.cat(tok_embs, dim=-1)
+ pos_emb = self.transformer.wpe(pos) # position embeddings of shape (1, t, n_embd)
+ x = tok_emb[:, :, :, : pred_idx + 1].sum(dim=-1)
+ x = self.transformer.drop(x + pos_emb)
+ for block in self.transformer.h:
+ x = block(x)
+ x = self.transformer.ln_f(x)
+ logits = self.lm_heads[pred_idx - self.config.n_codes_given](x)
+ return logits
+
+ def get_num_params(self, non_embedding=True):
+ """
+ Return the number of parameters in the model.
+ For non-embedding count (default), the position embeddings get subtracted.
+ The token embeddings would too, except due to the parameter sharing these
+ params are actually used as weights in the final layer, so we include them.
+ """
+ n_params = sum(p.numel() for p in self.parameters())
+ if non_embedding:
+ for wte in self.transformer.wtes:
+ n_params -= wte.weight.numel()
+ n_params -= self.transformer.wpe.weight.numel()
+ return n_params
+
+
+@dataclass
+class FineGPTConfig(GPTConfig):
+ n_codes_total: int = 8
+ n_codes_given: int = 1
diff --git a/bark/settings.py b/bark/settings.py
new file mode 100644
index 0000000000000000000000000000000000000000..81c660f3d2e33b21821583cb34c872c2ca23928b
--- /dev/null
+++ b/bark/settings.py
@@ -0,0 +1,7 @@
+import os
+
+def initenv(args):
+ os.environ['SUNO_USE_SMALL_MODELS'] = str("-smallmodels" in args)
+ os.environ['BARK_FORCE_CPU'] = str("-forcecpu" in args)
+ os.environ['SUNO_ENABLE_MPS'] = str("-enablemps" in args)
+ os.environ['SUNO_OFFLOAD_CPU'] = str("-offloadcpu" in args)
diff --git a/cloning/__init__.py b/cloning/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/cloning/clonevoice.py b/cloning/clonevoice.py
new file mode 100644
index 0000000000000000000000000000000000000000..fb38dc6df49195a5b6b052732fbfc35a7d69c55a
--- /dev/null
+++ b/cloning/clonevoice.py
@@ -0,0 +1,68 @@
+from bark.generation import load_codec_model, generate_text_semantic, grab_best_device
+from encodec.utils import convert_audio
+from bark.hubert.hubert_manager import HuBERTManager
+from bark.hubert.pre_kmeans_hubert import CustomHubert
+from bark.hubert.customtokenizer import CustomTokenizer
+
+import torchaudio
+import torch
+import os
+import gradio
+
+
+def clone_voice(audio_filepath, dest_filename, progress=gradio.Progress(track_tqdm=True)):
+ # if len(text) < 1:
+ # raise gradio.Error('No transcription text entered!')
+
+ use_gpu = not os.environ.get("BARK_FORCE_CPU", False)
+ progress(0, desc="Loading Codec")
+ model = load_codec_model(use_gpu=use_gpu)
+
+ # From https://github.com/gitmylo/bark-voice-cloning-HuBERT-quantizer
+ hubert_manager = HuBERTManager()
+ hubert_manager.make_sure_hubert_installed()
+ hubert_manager.make_sure_tokenizer_installed()
+
+ # From https://github.com/gitmylo/bark-voice-cloning-HuBERT-quantizer
+ # Load HuBERT for semantic tokens
+
+ # Load the HuBERT model
+ device = grab_best_device(use_gpu)
+ hubert_model = CustomHubert(checkpoint_path='./models/hubert/hubert.pt').to(device)
+
+ # Load the CustomTokenizer model
+ tokenizer = CustomTokenizer.load_from_checkpoint('./models/hubert/en_tokenizer.pth').to(device) # change to the correct path
+
+ progress(0.25, desc="Converting WAV")
+
+ # Load and pre-process the audio waveform
+ wav, sr = torchaudio.load(audio_filepath)
+ if wav.shape[0] == 2: # Stereo to mono if needed
+ wav = wav.mean(0, keepdim=True)
+
+ wav = convert_audio(wav, sr, model.sample_rate, model.channels)
+ wav = wav.to(device)
+ progress(0.5, desc="Extracting codes")
+
+ semantic_vectors = hubert_model.forward(wav, input_sample_hz=model.sample_rate)
+ semantic_tokens = tokenizer.get_token(semantic_vectors)
+
+ # Extract discrete codes from EnCodec
+ with torch.no_grad():
+ encoded_frames = model.encode(wav.unsqueeze(0))
+ codes = torch.cat([encoded[0] for encoded in encoded_frames], dim=-1).squeeze() # [n_q, T]
+
+ # get seconds of audio
+ # seconds = wav.shape[-1] / model.sample_rate
+ # generate semantic tokens
+ # semantic_tokens = generate_text_semantic(text, max_gen_duration_s=seconds, top_k=50, top_p=.95, temp=0.7)
+
+ # move codes to cpu
+ codes = codes.cpu().numpy()
+ # move semantic tokens to cpu
+ semantic_tokens = semantic_tokens.cpu().numpy()
+
+ import numpy as np
+ output_path = dest_filename + '.npz'
+ np.savez(output_path, fine_prompt=codes, coarse_prompt=codes[:2, :], semantic_prompt=semantic_tokens)
+ return ["Finished", output_path]
diff --git a/config.yaml b/config.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..6379e14809df5fbf93f65a1847661e16e1f75c1e
--- /dev/null
+++ b/config.yaml
@@ -0,0 +1,8 @@
+input_text_desired_length: 110
+input_text_max_length: 170
+selected_theme: JohnSmith9982/small_and_pretty
+server_name: ''
+server_port: 0
+server_share: false
+silence_between_sentences: 250
+silence_between_speakers: 500
diff --git a/pyproject.toml b/pyproject.toml
new file mode 100644
index 0000000000000000000000000000000000000000..cfc229ece9aac6a751857376ed5830d0ebce57bf
--- /dev/null
+++ b/pyproject.toml
@@ -0,0 +1,60 @@
+[build-system]
+requires = ["setuptools"]
+build-backend = "setuptools.build_meta"
+
+[project]
+name = "bark-ui-enhanced"
+version = "0.7.0"
+description = "Bark text to audio model with addition features and a Web UI"
+readme = "README.md"
+requires-python = ">=3.8"
+authors = [
+ {name = "Suno Inc (original Bark)", email = "hello@suno.ai"},
+ {name = "Count Floyd"},
+]
+# MIT License
+license = {file = "LICENSE"}
+
+dependencies = [
+ "boto3",
+ "encodec",
+ "funcy",
+ "huggingface-hub>=0.14.1",
+ "numpy",
+ "scipy",
+ "tokenizers",
+ "torch",
+ "tqdm",
+ "transformers",
+]
+
+[project.urls]
+source = "https://github.com/C0untFloyd/bark-gui"
+
+[project.optional-dependencies]
+dev = [
+ "bandit",
+ "black",
+ "codecov",
+ "flake8",
+ "hypothesis>=6.14,<7",
+ "isort>=5.0.0,<6",
+ "jupyter",
+ "mypy",
+ "nbconvert",
+ "nbformat",
+ "pydocstyle",
+ "pylint",
+ "pytest",
+ "pytest-cov",
+]
+
+[tool.setuptools]
+packages = ["bark"]
+
+[tool.setuptools.package-data]
+bark = ["assets/prompts/*.npz", "assets/prompts/v2/*.npz"]
+
+
+[tool.black]
+line-length = 100
diff --git a/requirements.txt b/requirements.txt
new file mode 100644
index 0000000000000000000000000000000000000000..c585ab7c617ec65d776f618c33a8a7eebc20471e
--- /dev/null
+++ b/requirements.txt
@@ -0,0 +1,13 @@
+fairseq; platform_system != "Windows"
+fairseq@https://github.com/Sharrnah/fairseq/releases/download/v0.12.4/fairseq-0.12.4-cp310-cp310-win_amd64.whl; platform_system == "Windows"
+audiolm-pytorch
+gradio
+funcy
+linkify
+mutagen
+pytorch_seed
+pyyaml
+sentencepiece
+soundfile; platform_system == "Windows"
+sox; platform_system != "Windows"
+transformers
\ No newline at end of file
diff --git a/setup.py b/setup.py
new file mode 100644
index 0000000000000000000000000000000000000000..606849326a4002007fd42060b51e69a19c18675c
--- /dev/null
+++ b/setup.py
@@ -0,0 +1,3 @@
+from setuptools import setup
+
+setup()
diff --git a/swap_voice.py b/swap_voice.py
new file mode 100644
index 0000000000000000000000000000000000000000..be1135be3648f9757046de1f9a4e240bd818be5a
--- /dev/null
+++ b/swap_voice.py
@@ -0,0 +1,62 @@
+from bark.generation import load_codec_model, generate_text_semantic, grab_best_device
+from bark import SAMPLE_RATE
+from encodec.utils import convert_audio
+from bark.hubert.hubert_manager import HuBERTManager
+from bark.hubert.pre_kmeans_hubert import CustomHubert
+from bark.hubert.customtokenizer import CustomTokenizer
+from bark.api import semantic_to_waveform
+from scipy.io.wavfile import write as write_wav
+from util.helper import create_filename
+from util.settings import Settings
+
+
+import torchaudio
+import torch
+import os
+import gradio
+
+def swap_voice_from_audio(swap_audio_filename, selected_speaker, tokenizer_lang, seed, batchcount, progress=gradio.Progress(track_tqdm=True)):
+ use_gpu = not os.environ.get("BARK_FORCE_CPU", False)
+ progress(0, desc="Loading Codec")
+
+ # From https://github.com/gitmylo/bark-voice-cloning-HuBERT-quantizer
+ hubert_manager = HuBERTManager()
+ hubert_manager.make_sure_hubert_installed()
+ hubert_manager.make_sure_tokenizer_installed(tokenizer_lang=tokenizer_lang)
+
+ # From https://github.com/gitmylo/bark-voice-cloning-HuBERT-quantizer
+ # Load HuBERT for semantic tokens
+
+ # Load the HuBERT model
+ device = grab_best_device(use_gpu)
+ hubert_model = CustomHubert(checkpoint_path='./models/hubert/hubert.pt').to(device)
+ model = load_codec_model(use_gpu=use_gpu)
+
+ # Load the CustomTokenizer model
+ tokenizer = CustomTokenizer.load_from_checkpoint(f'./models/hubert/{tokenizer_lang}_tokenizer.pth').to(device) # Automatically uses the right layers
+
+ progress(0.25, desc="Converting WAV")
+
+ # Load and pre-process the audio waveform
+ wav, sr = torchaudio.load(swap_audio_filename)
+ if wav.shape[0] == 2: # Stereo to mono if needed
+ wav = wav.mean(0, keepdim=True)
+
+ wav = convert_audio(wav, sr, model.sample_rate, model.channels)
+ wav = wav.to(device)
+ semantic_vectors = hubert_model.forward(wav, input_sample_hz=model.sample_rate)
+ semantic_tokens = tokenizer.get_token(semantic_vectors)
+
+ audio = semantic_to_waveform(
+ semantic_tokens,
+ history_prompt=selected_speaker,
+ temp=0.7,
+ silent=False,
+ output_full=False)
+
+ settings = Settings('config.yaml')
+
+ result = create_filename(settings.output_folder_path, None, "swapvoice",".wav")
+ write_wav(result, SAMPLE_RATE, audio)
+ return result
+
diff --git a/training/__init__.py b/training/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/training/data.py b/training/data.py
new file mode 100644
index 0000000000000000000000000000000000000000..dedf4c414823d374ed7123cdcef451500ddb6564
--- /dev/null
+++ b/training/data.py
@@ -0,0 +1,52 @@
+import random
+import requests
+import os, glob
+
+# english literature
+books = [
+ 'https://www.gutenberg.org/cache/epub/1513/pg1513.txt',
+ 'https://www.gutenberg.org/files/2701/2701-0.txt',
+ 'https://www.gutenberg.org/cache/epub/84/pg84.txt',
+ 'https://www.gutenberg.org/cache/epub/2641/pg2641.txt',
+ 'https://www.gutenberg.org/cache/epub/1342/pg1342.txt',
+ 'https://www.gutenberg.org/cache/epub/100/pg100.txt'
+ ]
+
+#default english
+# allowed_chars = ' abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!@#$%^&*()-_+=\"\':;[]{}/<>,.`~\n\\'
+
+#german
+allowed_chars = ' aäbcdefghijklmnoöpqrsßtuüvwxyzABCDEFGHIJKLMNOÖPQRSTUÜVWXYZ0123456789!@#$%^&*()-_+=\"\':;[]{}/<>,.`~\n\\'
+
+
+def download_book(book):
+ return requests.get(book).content.decode('utf-8')
+
+
+def filter_data(data):
+ print('Filtering data')
+ return ''.join([char for char in data if char in allowed_chars])
+
+
+def load_books(fromfolder=False):
+ text_data = []
+ if fromfolder:
+ current_working_directory = os.getcwd()
+ print(current_working_directory)
+ path = 'text'
+ for filename in glob.glob(os.path.join(path, '*.txt')):
+ with open(os.path.join(os.getcwd(), filename), 'r') as f: # open in readonly mode
+ print(f'Loading {filename}')
+ text_data.append(filter_data(str(f.read())))
+ else:
+ print(f'Loading {len(books)} books into ram')
+ for book in books:
+ text_data.append(filter_data(str(download_book(book))))
+ print('Loaded books')
+ return ' '.join(text_data)
+
+
+def random_split_chunk(data, size=14):
+ data = data.split(' ')
+ index = random.randrange(0, len(data))
+ return ' '.join(data[index:index+size])
diff --git a/training/train.py b/training/train.py
new file mode 100644
index 0000000000000000000000000000000000000000..be0cccc6145b46d026831cb71f198d2292fae931
--- /dev/null
+++ b/training/train.py
@@ -0,0 +1,47 @@
+import os
+import fnmatch
+import shutil
+
+import numpy
+import torchaudio
+import gradio
+
+from bark.hubert.pre_kmeans_hubert import CustomHubert
+from bark.hubert.customtokenizer import auto_train
+from tqdm.auto import tqdm
+
+
+def training_prepare_files(path, model,progress=gradio.Progress(track_tqdm=True)):
+
+ semanticsfolder = "./training/data/output"
+ wavfolder = "./training/data/output_wav"
+ ready = os.path.join(path, 'ready')
+
+ testfiles = fnmatch.filter(os.listdir(ready), '*.npy')
+ if(len(testfiles) < 1):
+ # prepare and copy for training
+ hubert_model = CustomHubert(checkpoint_path=model)
+
+ wavfiles = fnmatch.filter(os.listdir(wavfolder), '*.wav')
+ for i, f in tqdm(enumerate(wavfiles), total=len(wavfiles)):
+ semaname = '.'.join(f.split('.')[:-1]) # Cut off the extension
+ semaname = f'{semaname}.npy'
+ semafilename = os.path.join(semanticsfolder, semaname)
+ if not os.path.isfile(semafilename):
+ print(f'Skipping {f} no semantics pair found!')
+ continue
+
+ print('Processing', f)
+ wav, sr = torchaudio.load(os.path.join(wavfolder, f))
+ if wav.shape[0] == 2: # Stereo to mono if needed
+ wav = wav.mean(0, keepdim=True)
+ output = hubert_model.forward(wav, input_sample_hz=sr)
+ out_array = output.cpu().numpy()
+ fname = f'{i}_semantic_features.npy'
+ numpy.save(os.path.join(ready, fname), out_array)
+ fname = f'{i}_semantic.npy'
+ shutil.copy(semafilename, os.path.join(ready, fname))
+
+def train(path, save_every, max_epochs):
+ auto_train(path, save_epochs=save_every)
+
diff --git a/training/training_prepare.py b/training/training_prepare.py
new file mode 100644
index 0000000000000000000000000000000000000000..da4b30622d096fe636a0db358c43336eeef4d959
--- /dev/null
+++ b/training/training_prepare.py
@@ -0,0 +1,73 @@
+import random
+import uuid
+import numpy
+import os
+import random
+import fnmatch
+
+from tqdm.auto import tqdm
+from scipy.io import wavfile
+
+from bark.generation import load_model, SAMPLE_RATE
+from bark.api import semantic_to_waveform
+
+from bark import text_to_semantic
+from bark.generation import load_model
+
+from training.data import load_books, random_split_chunk
+
+output = 'training/data/output'
+output_wav = 'training/data/output_wav'
+
+
+def prepare_semantics_from_text(num_generations):
+ loaded_data = load_books(True)
+
+ print('Loading semantics model')
+ load_model(use_gpu=True, use_small=False, force_reload=False, model_type='text')
+
+ if not os.path.isdir(output):
+ os.mkdir(output)
+
+ loop = 1
+ while 1:
+ filename = uuid.uuid4().hex + '.npy'
+ file_name = os.path.join(output, filename)
+ text = ''
+ while not len(text) > 0:
+ text = random_split_chunk(loaded_data) # Obtain a short chunk of text
+ text = text.strip()
+ print(f'{loop} Generating semantics for text:', text)
+ loop+=1
+ semantics = text_to_semantic(text, temp=round(random.uniform(0.6, 0.8), ndigits=2))
+ numpy.save(file_name, semantics)
+
+
+def prepare_wavs_from_semantics():
+ if not os.path.isdir(output):
+ raise Exception('No \'output\' folder, make sure you run create_data.py first!')
+ if not os.path.isdir(output_wav):
+ os.mkdir(output_wav)
+
+ print('Loading coarse model')
+ load_model(use_gpu=True, use_small=False, force_reload=False, model_type='coarse')
+ print('Loading fine model')
+ load_model(use_gpu=True, use_small=False, force_reload=False, model_type='fine')
+
+ files = fnmatch.filter(os.listdir(output), '*.npy')
+ current = 1
+ total = len(files)
+
+ for i, f in tqdm(enumerate(files), total=len(files)):
+ real_name = '.'.join(f.split('.')[:-1]) # Cut off the extension
+ file_name = os.path.join(output, f)
+ out_file = os.path.join(output_wav, f'{real_name}.wav')
+ if not os.path.isfile(out_file) and os.path.isfile(file_name): # Don't process files that have already been processed, to be able to continue previous generations
+ print(f'Processing ({i+1}/{total}) -> {f}')
+ wav = semantic_to_waveform(numpy.load(file_name), temp=round(random.uniform(0.6, 0.8), ndigits=2))
+ # Change to PCM16
+ # wav = (wav * 32767).astype(np.int16)
+ wavfile.write(out_file, SAMPLE_RATE, wav)
+
+ print('Done!')
+
diff --git a/util/__init__.py b/util/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/util/helper.py b/util/helper.py
new file mode 100644
index 0000000000000000000000000000000000000000..185613661a2f450e55a5d2add1a1e75bc08f5c19
--- /dev/null
+++ b/util/helper.py
@@ -0,0 +1,35 @@
+import os
+from datetime import datetime
+from mutagen.wave import WAVE
+from mutagen.id3._frames import *
+
+def create_filename(path, seed, name, extension):
+ now = datetime.now()
+ date_str =now.strftime("%m-%d-%Y")
+ outputs_folder = os.path.join(os.getcwd(), path)
+ if not os.path.exists(outputs_folder):
+ os.makedirs(outputs_folder)
+
+ sub_folder = os.path.join(outputs_folder, date_str)
+ if not os.path.exists(sub_folder):
+ os.makedirs(sub_folder)
+
+ time_str = now.strftime("%H-%M-%S")
+ if seed == None:
+ file_name = f"{name}_{time_str}{extension}"
+ else:
+ file_name = f"{name}_{time_str}_s{seed}{extension}"
+ return os.path.join(sub_folder, file_name)
+
+
+def add_id3_tag(filename, text, speakername, seed):
+ audio = WAVE(filename)
+ if speakername == None:
+ speakername = "Unconditional"
+
+ # write id3 tag with text truncated to 60 chars, as a precaution...
+ audio["TIT2"] = TIT2(encoding=3, text=text[:60])
+ audio["TPE1"] = TPE1(encoding=3, text=f"Voice {speakername} using Seed={seed}")
+ audio["TPUB"] = TPUB(encoding=3, text="Bark by Suno AI")
+ audio["COMMENT"] = COMM(encoding=3, text="Generated with Bark GUI - Text-Prompted Generative Audio Model. Visit https://github.com/C0untFloyd/bark-gui")
+ audio.save()
diff --git a/util/parseinput.py b/util/parseinput.py
new file mode 100644
index 0000000000000000000000000000000000000000..f2102648cf169f0a52bb66755308fee5f81247e0
--- /dev/null
+++ b/util/parseinput.py
@@ -0,0 +1,129 @@
+import re
+import xml.etree.ElementTree as ET
+from xml.sax import saxutils
+#import nltk
+
+# Chunked generation originally from https://github.com/serp-ai/bark-with-voice-clone
+def split_and_recombine_text(text, desired_length=100, max_length=150):
+ # return nltk.sent_tokenize(text)
+
+ # from https://github.com/neonbjb/tortoise-tts
+ """Split text it into chunks of a desired length trying to keep sentences intact."""
+ # normalize text, remove redundant whitespace and convert non-ascii quotes to ascii
+ text = re.sub(r"\n\n+", "\n", text)
+ text = re.sub(r"\s+", " ", text)
+ text = re.sub(r"[“”]", '"', text)
+
+ rv = []
+ in_quote = False
+ current = ""
+ split_pos = []
+ pos = -1
+ end_pos = len(text) - 1
+
+ def seek(delta):
+ nonlocal pos, in_quote, current
+ is_neg = delta < 0
+ for _ in range(abs(delta)):
+ if is_neg:
+ pos -= 1
+ current = current[:-1]
+ else:
+ pos += 1
+ current += text[pos]
+ if text[pos] == '"':
+ in_quote = not in_quote
+ return text[pos]
+
+ def peek(delta):
+ p = pos + delta
+ return text[p] if p < end_pos and p >= 0 else ""
+
+ def commit():
+ nonlocal rv, current, split_pos
+ rv.append(current)
+ current = ""
+ split_pos = []
+
+ while pos < end_pos:
+ c = seek(1)
+ # do we need to force a split?
+ if len(current) >= max_length:
+ if len(split_pos) > 0 and len(current) > (desired_length / 2):
+ # we have at least one sentence and we are over half the desired length, seek back to the last split
+ d = pos - split_pos[-1]
+ seek(-d)
+ else:
+ # no full sentences, seek back until we are not in the middle of a word and split there
+ while c not in "!?.,\n " and pos > 0 and len(current) > desired_length:
+ c = seek(-1)
+ commit()
+ # check for sentence boundaries
+ elif not in_quote and (c in "!?]\n" or (c == "." and peek(1) in "\n ")):
+ # seek forward if we have consecutive boundary markers but still within the max length
+ while (
+ pos < len(text) - 1 and len(current) < max_length and peek(1) in "!?.]"
+ ):
+ c = seek(1)
+ split_pos.append(pos)
+ if len(current) >= desired_length:
+ commit()
+ # treat end of quote as a boundary if its followed by a space or newline
+ elif in_quote and peek(1) == '"' and peek(2) in "\n ":
+ seek(2)
+ split_pos.append(pos)
+ rv.append(current)
+
+ # clean up, remove lines with only whitespace or punctuation
+ rv = [s.strip() for s in rv]
+ rv = [s for s in rv if len(s) > 0 and not re.match(r"^[\s\.,;:!?]*$", s)]
+
+ return rv
+
+def is_ssml(value):
+ try:
+ ET.fromstring(value)
+ except ET.ParseError:
+ return False
+ return True
+
+def build_ssml(rawtext, selected_voice):
+ texts = rawtext.split("\n")
+ joinedparts = ""
+ for textpart in texts:
+ textpart = textpart.strip()
+ if len(textpart) < 1:
+ continue
+ joinedparts = joinedparts + f"\n{saxutils.escape(textpart)}"
+ ssml = f"""
+
+ {joinedparts}
+
+ """
+ return ssml
+
+def create_clips_from_ssml(ssmlinput):
+ # Parse the XML
+ tree = ET.ElementTree(ET.fromstring(ssmlinput))
+ root = tree.getroot()
+
+ # Create an empty list
+ voice_list = []
+
+ # Loop through all voice tags
+ for voice in root.iter('{http://www.w3.org/2001/10/synthesis}voice'):
+ # Extract the voice name attribute and the content text
+ voice_name = voice.attrib['name']
+ voice_content = voice.text.strip() if voice.text else ''
+ if(len(voice_content) > 0):
+ parts = split_and_recombine_text(voice_content)
+ for p in parts:
+ if(len(p) > 1):
+ # add to tuple list
+ voice_list.append((voice_name, p))
+ return voice_list
+
diff --git a/util/settings.py b/util/settings.py
new file mode 100644
index 0000000000000000000000000000000000000000..2ab66b0c7605d2b877defdd8592097a8a4c6f21a
--- /dev/null
+++ b/util/settings.py
@@ -0,0 +1,41 @@
+import yaml
+
+class Settings:
+ def __init__(self, config_file):
+ self.config_file = config_file
+ self.load()
+
+ def load(self):
+ try:
+ with open(self.config_file, 'r') as f:
+ data = yaml.load(f, Loader=yaml.FullLoader)
+ self.selected_theme = data.get('selected_theme', "gstaff/xkcd")
+ self.server_name = data.get('server_name', "")
+ self.server_port = data.get('server_port', 0)
+ self.server_share = data.get('server_share', False)
+ self.input_text_desired_length = data.get('input_text_desired_length', 110)
+ self.input_text_max_length = data.get('input_text_max_length', 170)
+ self.silence_sentence = data.get('silence_between_sentences', 250)
+ self.silence_speakers = data.get('silence_between_speakers', 500)
+ self.output_folder_path = data.get('output_folder_path', 'outputs')
+
+ except:
+ self.selected_theme = "gstaff/xkcd"
+
+ def save(self):
+ data = {
+ 'selected_theme': self.selected_theme,
+ 'server_name': self.server_name,
+ 'server_port': self.server_port,
+ 'server_share': self.server_share,
+ 'input_text_desired_length' : self.input_text_desired_length,
+ 'input_text_max_length' : self.input_text_max_length,
+ 'silence_between_sentences': self.silence_sentence,
+ 'silence_between_speakers': self.silence_speakers,
+ 'output_folder_path': self.output_folder_path
+ }
+ with open(self.config_file, 'w') as f:
+ yaml.dump(data, f)
+
+
+