diff --git "a/tts.ipynb" "b/tts.ipynb" new file mode 100644--- /dev/null +++ "b/tts.ipynb" @@ -0,0 +1,703 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Text to Speech Playground" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/opt/homebrew/Caskroom/miniconda/base/envs/llm/lib/python3.11/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n", + " from .autonotebook import tqdm as notebook_tqdm\n" + ] + } + ], + "source": [ + "import os\n", + "\n", + "import torch\n", + "import gradio as gr\n", + "from TTS.api import TTS\n", + "os.environ[\"COQUI_TOS_AGREED\"] = \"1\"\n", + "# os.environ[\"PYTORCH_ENABLE_MPS_FALLBACK\"] = \"1\"" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "from collections import namedtuple\n", + "\n", + "Voice = namedtuple('voice', ['name', 'neutral','angry'])\n" + ] + }, + { + "cell_type": "code", + "execution_count": 84, + "metadata": {}, + "outputs": [], + "source": [ + "voices = [\n", + " Voice('Attenborough', neutral='audio/attenborough/neutral.wav', angry=None),\n", + " Voice('Rick', neutral='audio/rick/neutral.wav', angry=None),\n", + " Voice('Freeman', neutral='audio/freeman/neutral.wav', angry='audio/freeman/angry.wav'),\n", + " Voice('Walken', neutral='audio/walken/neutral.wav', angry=None),\n", + " Voice('Darth Wader', neutral='audio/darth/neutral.wav', angry=None),\n", + "]" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "[voice(name='Attenborough', neutral='audio/attenborough/neutral.mp3', angry=None),\n", + " voice(name='Rick', neutral='audio/rick/neutral.mp3', angry=None),\n", + " voice(name='Freeman', neutral='audio/freeman/neutral.mp3', angry='audio/freeman/angry.mp3'),\n", + " voice(name='Walken', neutral='audio/walken/neutral.mp3', angry=None),\n", + " voice(name='Darth Wader', neutral='audio/darth/neutral.mp3', angry=None)]" + ] + }, + "execution_count": 5, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "voices" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + " > tts_models/multilingual/multi-dataset/xtts_v2 is already downloaded.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/opt/homebrew/Caskroom/miniconda/base/envs/llm/lib/python3.11/site-packages/transformers/utils/generic.py:441: UserWarning: torch.utils._pytree._register_pytree_node is deprecated. Please use torch.utils._pytree.register_pytree_node instead.\n", + " _torch_pytree._register_pytree_node(\n", + "/opt/homebrew/Caskroom/miniconda/base/envs/llm/lib/python3.11/site-packages/transformers/utils/generic.py:309: UserWarning: torch.utils._pytree._register_pytree_node is deprecated. Please use torch.utils._pytree.register_pytree_node instead.\n", + " _torch_pytree._register_pytree_node(\n", + "/opt/homebrew/Caskroom/miniconda/base/envs/llm/lib/python3.11/site-packages/transformers/utils/generic.py:309: UserWarning: torch.utils._pytree._register_pytree_node is deprecated. Please use torch.utils._pytree.register_pytree_node instead.\n", + " _torch_pytree._register_pytree_node(\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + " > Using model: xtts\n" + ] + } + ], + "source": [ + "#load model for text to speech\n", + "device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n", + "# device = \"mps\"\n", + "tts_pipelins = TTS(\"tts_models/multilingual/multi-dataset/xtts_v2\").to(device)" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [], + "source": [ + "import IPython\n" + ] + }, + { + "cell_type": "code", + "execution_count": 81, + "metadata": {}, + "outputs": [], + "source": [ + "speaker_embedding_cache = {}" + ] + }, + { + "cell_type": "code", + "execution_count": 82, + "metadata": {}, + "outputs": [], + "source": [ + "def compute_speaker_embedding(voice_path: str, config, pipeline, cache):\n", + " if voice_path not in cache:\n", + " cache[voice_path] = pipeline.synthesizer.tts_model.get_conditioning_latents(\n", + " audio_path=voice_path,\n", + " gpt_cond_len=config.gpt_cond_len,\n", + " gpt_cond_chunk_len=config.gpt_cond_chunk_len,\n", + " max_ref_length=config.max_ref_len,\n", + " sound_norm_refs=config.sound_norm_refs,\n", + " )\n", + " return cache[voice_path]" + ] + }, + { + "cell_type": "code", + "execution_count": 87, + "metadata": {}, + "outputs": [], + "source": [ + "out = compute_speaker_embedding(voices[0].neutral, tts_pipelins.synthesizer.tts_config, tts_pipelins, speaker_embedding_cache)" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + " > Text splitted to sentences.\n", + "['Hey Petra, so you are hungry?', 'and you like me to prepare some strawberries for you?', 'do you like strawberries?']\n", + " > Processing time: 15.77448582649231\n", + " > Real-time factor: 1.7459813091024587\n" + ] + } + ], + "source": [ + "out = tts_pipelins.tts(\n", + " \"Hello, I am Rick, pickle rick, you took a wrong turn and now you're stuck in a parallel universe\",\n", + " speaker_wav=\"audio/freeman/neutral.wav\",\n", + " language=\"en\",\n", + " # file_path=\"out.wav\",\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "metadata": {}, + "outputs": [], + "source": [ + "from typing import List\n", + "import time" + ] + }, + { + "cell_type": "code", + "execution_count": 19, + "metadata": {}, + "outputs": [], + "source": [ + "ref_audio_path = \"audio/freeman/neutral.wav\"" + ] + }, + { + "cell_type": "code", + "execution_count": 53, + "metadata": {}, + "outputs": [], + "source": [ + "config.max_ref_len = 360" + ] + }, + { + "cell_type": "code", + "execution_count": 78, + "metadata": {}, + "outputs": [], + "source": [ + "config = tts_pipelins.synthesizer.tts_config\n", + "(gpt_cond_latent, speaker_embedding) = tts_pipelins.synthesizer.tts_model.get_conditioning_latents(\n", + " audio_path=ref_audio_path,\n", + " gpt_cond_len=config.gpt_cond_len,\n", + " gpt_cond_chunk_len=config.gpt_cond_chunk_len,\n", + " max_ref_length=config.max_ref_len,\n", + " sound_norm_refs=config.sound_norm_refs,\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": 107, + "metadata": {}, + "outputs": [], + "source": [ + "(gpt_cond_latent, speaker_embedding) = compute_speaker_embedding(voices[0].neutral, tts_pipelins.synthesizer.tts_config, tts_pipelins, speaker_embedding_cache)" + ] + }, + { + "cell_type": "code", + "execution_count": 114, + "metadata": {}, + "outputs": [], + "source": [ + "import numpy as np" + ] + }, + { + "cell_type": "code", + "execution_count": 116, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "(205872,)" + ] + }, + "execution_count": 116, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "np.array(out)" + ] + }, + { + "cell_type": "code", + "execution_count": 110, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "205872" + ] + }, + "execution_count": 110, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "len(out)" + ] + }, + { + "cell_type": "code", + "execution_count": 128, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + " > Text splitted to sentences.\n", + "['Something is up!']\n", + " > Processing time: 2.9515581130981445\n", + " > Real-time factor: 1.588292083019672\n" + ] + } + ], + "source": [ + "out = tts(\n", + " tts_pipelins.synthesizer,\n", + " \"Something is up!\",\n", + " # speaker_wav=ref_audio_path,\n", + " language_name=\"en\",\n", + " speaker=None,\n", + " gpt_cond_latent=gpt_cond_latent,\n", + " speaker_embedding=speaker_embedding,\n", + " speed=1.1,\n", + " # file_path=\"out.wav\",\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": 129, + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "\n", + " \n", + " " + ], + "text/plain": [ + "" + ] + }, + "execution_count": 129, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "IPython.display.Audio(out, rate=22050)" + ] + }, + { + "cell_type": "code", + "execution_count": 66, + "metadata": {}, + "outputs": [], + "source": [ + "from TTS.vocoder.utils.generic_utils import interpolate_vocoder_input\n", + "\n", + "def tts(\n", + " self,\n", + " text: str = \"\",\n", + " language_name: str = \"\",\n", + " reference_wav=None,\n", + " gpt_cond_latent=None,\n", + " speaker_embedding=None,\n", + " split_sentences: bool = True,\n", + " **kwargs,\n", + ") -> List[int]:\n", + " \"\"\"🐸 TTS magic. Run all the models and generate speech.\n", + "\n", + " Args:\n", + " text (str): input text.\n", + " speaker_name (str, optional): speaker id for multi-speaker models. Defaults to \"\".\n", + " language_name (str, optional): language id for multi-language models. Defaults to \"\".\n", + " speaker_wav (Union[str, List[str]], optional): path to the speaker wav for voice cloning. Defaults to None.\n", + " style_wav ([type], optional): style waveform for GST. Defaults to None.\n", + " style_text ([type], optional): transcription of style_wav for Capacitron. Defaults to None.\n", + " reference_wav ([type], optional): reference waveform for voice conversion. Defaults to None.\n", + " reference_speaker_name ([type], optional): speaker id of reference waveform. Defaults to None.\n", + " split_sentences (bool, optional): split the input text into sentences. Defaults to True.\n", + " **kwargs: additional arguments to pass to the TTS model.\n", + " Returns:\n", + " List[int]: [description]\n", + " \"\"\"\n", + " start_time = time.time()\n", + " wavs = []\n", + "\n", + " if not text and not reference_wav:\n", + " raise ValueError(\n", + " \"You need to define either `text` (for sythesis) or a `reference_wav` (for voice conversion) to use the Coqui TTS API.\"\n", + " )\n", + "\n", + " if text:\n", + " sens = [text]\n", + " if split_sentences:\n", + " print(\" > Text splitted to sentences.\")\n", + " sens = self.split_into_sentences(text)\n", + " print(sens)\n", + "\n", + " if not reference_wav: # not voice conversion\n", + " for sen in sens:\n", + " outputs = self.tts_model.inference(\n", + " sen,\n", + " language_name,\n", + " gpt_cond_latent,\n", + " speaker_embedding,\n", + " # GPT inference\n", + " temperature=0.75,\n", + " length_penalty=1.0,\n", + " repetition_penalty=10.0,\n", + " top_k=50,\n", + " top_p=0.85,\n", + " do_sample=True,\n", + " **kwargs,\n", + " )\n", + " waveform = outputs[\"wav\"]\n", + " if torch.is_tensor(waveform) and waveform.device != torch.device(\"cpu\") and not use_gl:\n", + " waveform = waveform.cpu()\n", + " if not use_gl:\n", + " waveform = waveform.numpy()\n", + " waveform = waveform.squeeze()\n", + "\n", + " # trim silence\n", + " if \"do_trim_silence\" in self.tts_config.audio and self.tts_config.audio[\"do_trim_silence\"]:\n", + " waveform = trim_silence(waveform, self.tts_model.ap)\n", + "\n", + " wavs += list(waveform)\n", + " wavs += [0] * 10000\n", + "\n", + "\n", + " # compute stats\n", + " process_time = time.time() - start_time\n", + " audio_time = len(wavs) / self.tts_config.audio[\"sample_rate\"]\n", + " print(f\" > Processing time: {process_time}\")\n", + " print(f\" > Real-time factor: {process_time / audio_time}\")\n", + " return wavs" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "type(tts_pipelins)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "IPython.display.Audio(out, rate=22050)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def text_to_speech(voice, tts):\n", + " return voice.neutral" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + " tts.tts_to_file(text= str(quest_processing[0]),\n", + " file_path=\"output.wav\",\n", + " speaker_wav=f'Audio_Files/{voice}.wav',\n", + " language=quest_processing[3],\n", + " emotion = \"angry\")\n", + "\n", + " audio_path = \"output.wav\"\n", + " return audio_path, state['context'], state" + ] + }, + { + "cell_type": "code", + "execution_count": 90, + "metadata": {}, + "outputs": [], + "source": [ + "voice_options = []\n", + "for voice in voices:\n", + " if voice.neutral:\n", + " voice_options.append(f\"{voice.name} - Neutral\")\n", + " if voice.angry:\n", + " voice_options.append(f\"{voice.name} - Angry\")" + ] + }, + { + "cell_type": "code", + "execution_count": 101, + "metadata": {}, + "outputs": [], + "source": [ + "def voice_from_text(voice):\n", + " for v in voices:\n", + " if voice == f\"{v.name} - Neutral\":\n", + " return v.neutral\n", + " if voice == f\"{v.name} - Angry\":\n", + " return v.angry" + ] + }, + { + "cell_type": "code", + "execution_count": 121, + "metadata": {}, + "outputs": [], + "source": [ + "def tts_gradio(text, voice, state):\n", + " print(text, voice, state)\n", + " voice_path = voice_from_text(voice)\n", + " (gpt_cond_latent, speaker_embedding) = compute_speaker_embedding(voice_path, tts_pipelins.synthesizer.tts_config, tts_pipelins, speaker_embedding_cache)\n", + " out = tts(\n", + " tts_pipelins.synthesizer,\n", + " text,\n", + " language_name=\"en\",\n", + " speaker=None,\n", + " gpt_cond_latent=gpt_cond_latent,\n", + " speaker_embedding=speaker_embedding,\n", + " speed=1.1,\n", + " # file_path=\"out.wav\",\n", + " )\n", + " return (22050, np.array(out)), dict(text=text, voice=voice)" + ] + }, + { + "cell_type": "code", + "execution_count": 122, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "dict_keys(['audio/attenborough/neutral.wav'])" + ] + }, + "execution_count": 122, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "speaker_embedding_cache.keys()" + ] + }, + { + "cell_type": "code", + "execution_count": 127, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "This is going to be fun, let's enjoy ourselves\n", + "Closing server running on port: 7860\n", + "Closing server running on port: 7860\n", + "Closing server running on port: 7860\n", + "Closing server running on port: 7860\n", + "Closing server running on port: 7860\n", + "Closing server running on port: 7860\n", + "Closing server running on port: 7860\n", + "Closing server running on port: 7860\n", + "Closing server running on port: 7860\n", + "Closing server running on port: 7860\n", + "Closing server running on port: 7860\n", + "Running on local URL: http://0.0.0.0:7860\n", + "\n", + "To create a public link, set `share=True` in `launch()`.\n" + ] + }, + { + "data": { + "text/html": [ + "
" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "This is going to be fun, let's enjoy ourselves Darth Wader - Neutral None\n", + " > Text splitted to sentences.\n", + "[\"This is going to be fun, let's enjoy ourselves\"]\n", + " > Processing time: 9.152068138122559\n", + " > Real-time factor: 1.8119083325456329\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/opt/homebrew/Caskroom/miniconda/base/envs/llm/lib/python3.11/site-packages/gradio/processing_utils.py:390: UserWarning: Trying to convert audio automatically from float64 to 16-bit int format.\n", + " warnings.warn(warning.format(data.dtype))\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "This is going to be fun, let's enjoy ourselves Darth Wader - Neutral {'text': \"This is going to be fun, let's enjoy ourselves\", 'voice': 'Darth Wader - Neutral'}\n", + " > Text splitted to sentences.\n", + "[\"This is going to be fun, let's enjoy ourselves\"]\n", + " > Processing time: 7.824646234512329\n", + " > Real-time factor: 1.8261372721316347\n", + "Keyboard interruption in main thread... closing server.\n" + ] + }, + { + "data": { + "text/plain": [] + }, + "execution_count": 127, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "#INTERFACE WITH AUDIO TO AUDIO\n", + "\n", + "#to be able to use the microphone on chrome, you will have to go to chrome://flags/#unsafely-treat-insecure-origin-as-secure and enter http://10.186.115.21:7860/ \n", + "#in \"Insecure origins treated as secure\", enable it and relaunch chrome\n", + "\n", + "\n", + "model_answer= ''\n", + "general_context= \"This is going to be fun, let's enjoy ourselves\"\n", + "# Define the initial state with some initial context.\n", + "print(general_context)\n", + "initial_state = {'context': general_context}\n", + "initial_context= initial_state['context']\n", + "# Create the Gradio interface.\n", + "iface = gr.Interface(\n", + " fn=tts_gradio,\n", + " inputs=[\n", + " gr.Textbox(value=initial_context, visible=True, label='Enter the text to be converted to speech', placeholder=\"This is going to be fun, let's enjoy ourselves\", lines=5),\n", + " gr.Radio(choices=voice_options, label='Choose a voice', value=voice_options[0], show_label=True), # Radio button for voice selection\n", + " gr.State() # This will keep track of the context state across interactions.\n", + " ],\n", + " outputs=[\n", + " gr.Audio(label = 'output audio', autoplay=True),\n", + " gr.State()\n", + " ],\n", + " flagging_options=['👎', '👍'],\n", + ")\n", + "#close all interfaces open to make the port available\n", + "gr.close_all()\n", + "# Launch the interface.\n", + "iface.launch(debug=True, share=False, server_name=\"0.0.0.0\", server_port=7860, ssl_verify=False)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "base", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.8" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +}