{ "cells": [ { "cell_type": "code", "execution_count": null, "id": "cb5d0890-3f2d-4020-8270-f3a9bb9f63c6", "metadata": {}, "outputs": [], "source": [ "%%bash # install the vall-e and required libraries\n", "# PyTorch\n", "pip install torch==1.13.1 torchaudio==0.13.1 --extra-index-url https://download.pytorch.org/whl/cu116\n", "pip install torchmetrics==0.11.1\n", "# fbank\n", "pip install librosa==0.8.1\n", "\n", "# phonemizer pypinyin\n", "apt-get install espeak-ng\n", "## OSX: brew install espeak\n", "pip install phonemizer==3.2.1 pypinyin==0.48.0\n", "\n", "# lhotse update to newest version\n", "# https://github.com/lhotse-speech/lhotse/pull/956\n", "# https://github.com/lhotse-speech/lhotse/pull/960\n", "pip uninstall lhotse\n", "pip install lhotse\n", "\n", "# k2\n", "# find the right version in https://huggingface.co/csukuangfj/k2\n", "pip install https://huggingface.co/csukuangfj/k2/resolve/main/cuda/k2-1.23.4.dev20230224+cuda11.6.torch1.13.1-cp310-cp310-linux_x86_64.whl\n", "\n", "# icefall\n", "git clone https://github.com/k2-fsa/icefall\n", "cd icefall\n", "pip install -r requirements.txt\n", "export PYTHONPATH=`pwd`/../icefall:$PYTHONPATH\n", "echo \"export PYTHONPATH=`pwd`/../icefall:\\$PYTHONPATH\" >> ~/.zshrc\n", "echo \"export PYTHONPATH=`pwd`/../icefall:\\$PYTHONPATH\" >> ~/.bashrc\n", "cd -\n", "source ~/.zshrc\n", "\n", "# valle\n", "git clone https://github.com/lifeiteng/valle.git\n", "cd valle\n", "pip install -e ." ] }, { "cell_type": "code", "execution_count": 1, "id": "1b8a4af2-5851-4c41-96bb-bda4b259f857", "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "/home/dongsun/.local/lib/python3.10/site-packages/torchvision/io/image.py:13: UserWarning: Failed to load image Python extension: '/home/dongsun/.local/lib/python3.10/site-packages/torchvision/image.so: undefined symbol: _ZN3c104cuda20CUDACachingAllocator9allocatorE'If you don't plan on using image functionality from `torchvision.io`, you can ignore this warning. Otherwise, there might be something wrong with your environment. Did you have `libjpeg` or `libpng` installed before building `torchvision` from source?\n", " warn(\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "[2023-09-21 14:36:33,978] [INFO] [real_accelerator.py:133:get_accelerator] Setting ds_accelerator to cuda (auto detect)\n", "Use 8 cpu cores for computing\n" ] } ], "source": [ "import argparse\n", "import logging\n", "import os\n", "import pathlib\n", "import time\n", "import tempfile\n", "import platform\n", "import webbrowser\n", "import sys\n", "import torch, torchaudio\n", "import random\n", "\n", "import numpy as np\n", "\n", "from valle.data import (\n", " AudioTokenizer,\n", " TextTokenizer,\n", " tokenize_audio,\n", " tokenize_text,\n", ")\n", "from icefall.utils import AttributeDict\n", "from valle.data.collation import get_text_token_collater\n", "from valle.models import get_model\n", "\n", "from vocos import Vocos\n", "from encodec.utils import convert_audio\n", "import multiprocessing\n", "\n", "thread_count = multiprocessing.cpu_count()\n", "\n", "print(\"Use\",thread_count,\"cpu cores for computing\")\n", "\n", "torch.set_num_threads(thread_count)\n", "torch.set_num_interop_threads(thread_count)\n", "torch._C._jit_set_profiling_executor(False)\n", "torch._C._jit_set_profiling_mode(False)\n", "torch._C._set_graph_executor_optimize(False)\n", "\n", "text_tokenizer = TextTokenizer(language='ko')\n", "\n", "device = torch.device(\"cpu\")\n", "if torch.cuda.is_available():\n", " device = torch.device(\"cuda\", 0)\n", "\n", "checkpoint = torch.load(\"./vall-e_ko_v0.pt\", map_location='cpu')\n", "model = get_model(AttributeDict(checkpoint))\n", "missing_keys, unexpected_keys = model.load_state_dict(\n", " checkpoint[\"model\"], strict=True\n", ")\n", "assert not missing_keys\n", "model.eval()\n", "model.to(device)\n", "text_collater = get_text_token_collater('./unique_text_tokens.k2symbols')\n", "\n", "# Encodec model\n", "audio_tokenizer = AudioTokenizer(device)\n", "\n", "# Vocos decoder\n", "vocos = Vocos.from_pretrained('charactr/vocos-encodec-24khz').to(device)\n", "\n", "model.to(device)\n", "@torch.no_grad()\n", "def infer_from_prompt(text_prompt, audio_prompt, text):\n", " ## text to token\n", " text_tokens, text_tokens_lens = text_collater(\n", " [\n", " tokenize_text(\n", " text_tokenizer, text=f\"{text_prompt} {text}\".strip()\n", " )\n", " ]\n", " )\n", " _, enroll_x_lens = text_collater(\n", " [\n", " tokenize_text(\n", " text_tokenizer, text=f\"{text_prompt}\".strip()\n", " )\n", " ]\n", " )\n", " print('text_loaded')\n", "\n", " # text to synthesize\n", " wav_pr, sr = torchaudio.load(audio_prompt)\n", " wav_pr = convert_audio(wav_pr, sr, audio_tokenizer.sample_rate, audio_tokenizer.channels)\n", " audio_prompts = audio_tokenizer.encode(wav_pr.unsqueeze(0))[0][0].transpose(2, 1).to(device)\n", " print('Audio encoded')\n", "\n", " encoded_frames = model.inference(\n", " text_tokens.to(device), text_tokens_lens.to(device),\n", " audio_prompts, enroll_x_lens=enroll_x_lens,\n", " top_k=-100, temperature=1)\n", " vocos_features = vocos.codes_to_features(encoded_frames.permute(2, 0, 1))\n", " samples = vocos.decode(vocos_features, bandwidth_id=torch.tensor([2], device=device))\n", " message = f\"sythesized text: {text}\"\n", " return message, (24000, samples.squeeze(0).cpu().numpy())\n" ] }, { "cell_type": "markdown", "id": "fa6e2e1d-7522-43f0-985c-e731047acd9c", "metadata": {}, "source": [ "# Example" ] }, { "cell_type": "code", "execution_count": 2, "id": "41e40fe5-595e-4f9a-8dd7-dfda52944529", "metadata": {}, "outputs": [ { "data": { "text/html": [ "
╭─────────────────────────────── Traceback (most recent call last) ────────────────────────────────╮\n",
       " in <module>                                                                                      \n",
       "                                                                                                  \n",
       "   1 text_prompt = '' # text of the audio                                                         \n",
       "   2 audio_prompt = '' # path to the audio file                                                   \n",
       "   3 text = '' #                                                                                  \n",
       " 4 message, (sr, data) = infer_from_prompt(text_prompt, audio_prompt, text)                     \n",
       "   5                                                                                              \n",
       "                                                                                                  \n",
       " /home/dongsun/.local/lib/python3.10/site-packages/torch/autograd/grad_mode.py:27 in              \n",
       " decorate_context                                                                                 \n",
       "                                                                                                  \n",
       "    24 │   │   @functools.wraps(func)                                                             \n",
       "    25 │   │   def decorate_context(*args, **kwargs):                                             \n",
       "    26 │   │   │   with self.clone():                                                             \n",
       "  27 │   │   │   │   return func(*args, **kwargs)                                               \n",
       "    28 │   │   return cast(F, decorate_context)                                                   \n",
       "    29 │                                                                                          \n",
       "    30 │   def _wrap_generator(self, func):                                                       \n",
       "                                                                                                  \n",
       " in infer_from_prompt                                                                             \n",
       "                                                                                                  \n",
       "   64 │   ## text to token                                                                        \n",
       "   65 │   text_tokens, text_tokens_lens = text_collater(                                          \n",
       "   66 │   │   [                                                                                   \n",
       " 67 │   │   │   tokenize_text(                                                                  \n",
       "   68 │   │   │   │   text_tokenizer, text=f\"{text_prompt} {text}\".strip()                        \n",
       "   69 │   │   │   )                                                                               \n",
       "   70 │   │   ]                                                                                   \n",
       "                                                                                                  \n",
       " /home/dongsun/vall-e/valle/data/tokenizer.py:178 in tokenize_text                                \n",
       "                                                                                                  \n",
       "   175                                                                                            \n",
       "   176 def tokenize_text(tokenizer: TextTokenizer, text: str) -> List[str]:                       \n",
       "   177 │   phonemes = tokenizer([text.strip()])                                                   \n",
       " 178 return phonemes[0]  # k2symbols                                                        \n",
       "   179                                                                                            \n",
       "   180                                                                                            \n",
       "   181 def remove_encodec_weight_norm(model):                                                     \n",
       "╰──────────────────────────────────────────────────────────────────────────────────────────────────╯\n",
       "IndexError: list index out of range\n",
       "
\n" ], "text/plain": [ "\u001b[31m╭─\u001b[0m\u001b[31m──────────────────────────────\u001b[0m\u001b[31m \u001b[0m\u001b[1;31mTraceback \u001b[0m\u001b[1;2;31m(most recent call last)\u001b[0m\u001b[31m \u001b[0m\u001b[31m───────────────────────────────\u001b[0m\u001b[31m─╮\u001b[0m\n", "\u001b[31m│\u001b[0m in \u001b[92m\u001b[0m \u001b[31m│\u001b[0m\n", "\u001b[31m│\u001b[0m \u001b[31m│\u001b[0m\n", "\u001b[31m│\u001b[0m \u001b[2m1 \u001b[0mtext_prompt = \u001b[33m'\u001b[0m\u001b[33m'\u001b[0m \u001b[2m# text of the audio \u001b[0m \u001b[31m│\u001b[0m\n", "\u001b[31m│\u001b[0m \u001b[2m2 \u001b[0maudio_prompt = \u001b[33m'\u001b[0m\u001b[33m'\u001b[0m \u001b[2m# path to the audio file\u001b[0m \u001b[31m│\u001b[0m\n", "\u001b[31m│\u001b[0m \u001b[2m3 \u001b[0mtext = \u001b[33m'\u001b[0m\u001b[33m'\u001b[0m \u001b[2m# \u001b[0m \u001b[31m│\u001b[0m\n", "\u001b[31m│\u001b[0m \u001b[31m❱ \u001b[0m4 message, (sr, data) = infer_from_prompt(text_prompt, audio_prompt, text) \u001b[31m│\u001b[0m\n", "\u001b[31m│\u001b[0m \u001b[2m5 \u001b[0m \u001b[31m│\u001b[0m\n", "\u001b[31m│\u001b[0m \u001b[31m│\u001b[0m\n", "\u001b[31m│\u001b[0m \u001b[2;33m/home/dongsun/.local/lib/python3.10/site-packages/torch/autograd/\u001b[0m\u001b[1;33mgrad_mode.py\u001b[0m:\u001b[94m27\u001b[0m in \u001b[31m│\u001b[0m\n", "\u001b[31m│\u001b[0m \u001b[92mdecorate_context\u001b[0m \u001b[31m│\u001b[0m\n", "\u001b[31m│\u001b[0m \u001b[31m│\u001b[0m\n", "\u001b[31m│\u001b[0m \u001b[2m 24 \u001b[0m\u001b[2m│ │ \u001b[0m\u001b[1;95m@functools\u001b[0m.wraps(func) \u001b[31m│\u001b[0m\n", "\u001b[31m│\u001b[0m \u001b[2m 25 \u001b[0m\u001b[2m│ │ \u001b[0m\u001b[94mdef\u001b[0m \u001b[92mdecorate_context\u001b[0m(*args, **kwargs): \u001b[31m│\u001b[0m\n", "\u001b[31m│\u001b[0m \u001b[2m 26 \u001b[0m\u001b[2m│ │ │ \u001b[0m\u001b[94mwith\u001b[0m \u001b[96mself\u001b[0m.clone(): \u001b[31m│\u001b[0m\n", "\u001b[31m│\u001b[0m \u001b[31m❱ \u001b[0m 27 \u001b[2m│ │ │ │ \u001b[0m\u001b[94mreturn\u001b[0m func(*args, **kwargs) \u001b[31m│\u001b[0m\n", "\u001b[31m│\u001b[0m \u001b[2m 28 \u001b[0m\u001b[2m│ │ \u001b[0m\u001b[94mreturn\u001b[0m cast(F, decorate_context) \u001b[31m│\u001b[0m\n", "\u001b[31m│\u001b[0m \u001b[2m 29 \u001b[0m\u001b[2m│ \u001b[0m \u001b[31m│\u001b[0m\n", "\u001b[31m│\u001b[0m \u001b[2m 30 \u001b[0m\u001b[2m│ \u001b[0m\u001b[94mdef\u001b[0m \u001b[92m_wrap_generator\u001b[0m(\u001b[96mself\u001b[0m, func): \u001b[31m│\u001b[0m\n", "\u001b[31m│\u001b[0m \u001b[31m│\u001b[0m\n", "\u001b[31m│\u001b[0m in \u001b[92minfer_from_prompt\u001b[0m \u001b[31m│\u001b[0m\n", "\u001b[31m│\u001b[0m \u001b[31m│\u001b[0m\n", "\u001b[31m│\u001b[0m \u001b[2m64 \u001b[0m\u001b[2m│ \u001b[0m\u001b[2m## text to token\u001b[0m \u001b[31m│\u001b[0m\n", "\u001b[31m│\u001b[0m \u001b[2m65 \u001b[0m\u001b[2m│ \u001b[0mtext_tokens, text_tokens_lens = text_collater( \u001b[31m│\u001b[0m\n", "\u001b[31m│\u001b[0m \u001b[2m66 \u001b[0m\u001b[2m│ │ \u001b[0m[ \u001b[31m│\u001b[0m\n", "\u001b[31m│\u001b[0m \u001b[31m❱ \u001b[0m67 \u001b[2m│ │ │ \u001b[0mtokenize_text( \u001b[31m│\u001b[0m\n", "\u001b[31m│\u001b[0m \u001b[2m68 \u001b[0m\u001b[2m│ │ │ │ \u001b[0mtext_tokenizer, text=\u001b[33mf\u001b[0m\u001b[33m\"\u001b[0m\u001b[33m{\u001b[0mtext_prompt\u001b[33m}\u001b[0m\u001b[33m \u001b[0m\u001b[33m{\u001b[0mtext\u001b[33m}\u001b[0m\u001b[33m\"\u001b[0m.strip() \u001b[31m│\u001b[0m\n", "\u001b[31m│\u001b[0m \u001b[2m69 \u001b[0m\u001b[2m│ │ │ \u001b[0m) \u001b[31m│\u001b[0m\n", "\u001b[31m│\u001b[0m \u001b[2m70 \u001b[0m\u001b[2m│ │ \u001b[0m] \u001b[31m│\u001b[0m\n", "\u001b[31m│\u001b[0m \u001b[31m│\u001b[0m\n", "\u001b[31m│\u001b[0m \u001b[2;33m/home/dongsun/vall-e/valle/data/\u001b[0m\u001b[1;33mtokenizer.py\u001b[0m:\u001b[94m178\u001b[0m in \u001b[92mtokenize_text\u001b[0m \u001b[31m│\u001b[0m\n", "\u001b[31m│\u001b[0m \u001b[31m│\u001b[0m\n", "\u001b[31m│\u001b[0m \u001b[2m175 \u001b[0m \u001b[31m│\u001b[0m\n", "\u001b[31m│\u001b[0m \u001b[2m176 \u001b[0m\u001b[94mdef\u001b[0m \u001b[92mtokenize_text\u001b[0m(tokenizer: TextTokenizer, text: \u001b[96mstr\u001b[0m) -> List[\u001b[96mstr\u001b[0m]: \u001b[31m│\u001b[0m\n", "\u001b[31m│\u001b[0m \u001b[2m177 \u001b[0m\u001b[2m│ \u001b[0mphonemes = tokenizer([text.strip()]) \u001b[31m│\u001b[0m\n", "\u001b[31m│\u001b[0m \u001b[31m❱ \u001b[0m178 \u001b[2m│ \u001b[0m\u001b[94mreturn\u001b[0m phonemes[\u001b[94m0\u001b[0m] \u001b[2m# k2symbols\u001b[0m \u001b[31m│\u001b[0m\n", "\u001b[31m│\u001b[0m \u001b[2m179 \u001b[0m \u001b[31m│\u001b[0m\n", "\u001b[31m│\u001b[0m \u001b[2m180 \u001b[0m \u001b[31m│\u001b[0m\n", "\u001b[31m│\u001b[0m \u001b[2m181 \u001b[0m\u001b[94mdef\u001b[0m \u001b[92mremove_encodec_weight_norm\u001b[0m(model): \u001b[31m│\u001b[0m\n", "\u001b[31m╰──────────────────────────────────────────────────────────────────────────────────────────────────╯\u001b[0m\n", "\u001b[1;91mIndexError: \u001b[0mlist index out of range\n" ] }, "metadata": {}, "output_type": "display_data" } ], "source": [ "text_prompt = '' # text of the audio \n", "audio_prompt = '' # path to the audio file\n", "text = '' # \n", "message, (sr, data) = infer_from_prompt(text_prompt, audio_prompt, text)" ] }, { "cell_type": "code", "execution_count": null, "id": "1f97f088-74a4-4cbb-a18b-d884adf81546", "metadata": {}, "outputs": [], "source": [ "print(message)\n", "from IPython.display import Audio\n", "Audio(data, rate=sr)" ] }, { "cell_type": "markdown", "id": "1cedb3cc-7486-4a3d-9dcd-1facffdb78ad", "metadata": {}, "source": [ "# Simple Gradio App" ] }, { "cell_type": "code", "execution_count": 3, "id": "723c13c7-36f5-4af6-bc0b-bbf6d65c2e3a", "metadata": { "collapsed": true, "jupyter": { "outputs_hidden": true }, "tags": [] }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Defaulting to user installation because normal site-packages is not writeable\n", "\u001b[33mWARNING: Ignoring invalid distribution -orch (/home/dongsun/.local/lib/python3.10/site-packages)\u001b[0m\u001b[33m\n", "\u001b[0m\u001b[33mWARNING: Ignoring invalid distribution -orch (/home/dongsun/.local/lib/python3.10/site-packages)\u001b[0m\u001b[33m\n", "\u001b[0mRequirement already satisfied: gradio in /home/dongsun/.local/lib/python3.10/site-packages (3.32.0)\n", "Requirement already satisfied: markdown-it-py[linkify]>=2.0.0 in /home/dongsun/.local/lib/python3.10/site-packages (from gradio) (2.1.0)\n", "Requirement already satisfied: semantic-version in /home/dongsun/.local/lib/python3.10/site-packages (from gradio) (2.10.0)\n", "Requirement already satisfied: pandas in /home/dongsun/.local/lib/python3.10/site-packages (from gradio) (2.0.3)\n", "Requirement already satisfied: uvicorn>=0.14.0 in /home/dongsun/.local/lib/python3.10/site-packages (from gradio) (0.19.0)\n", "Requirement already satisfied: mdit-py-plugins<=0.3.3 in /home/dongsun/.local/lib/python3.10/site-packages (from gradio) (0.3.1)\n", "Requirement already satisfied: httpx in /home/dongsun/.local/lib/python3.10/site-packages (from gradio) (0.23.0)\n", "Requirement already satisfied: orjson in /home/dongsun/.local/lib/python3.10/site-packages (from gradio) (3.8.0)\n", "Requirement already satisfied: ffmpy in /home/dongsun/.local/lib/python3.10/site-packages (from gradio) (0.3.0)\n", "Requirement already satisfied: pygments>=2.12.0 in /home/dongsun/.local/lib/python3.10/site-packages (from gradio) (2.14.0)\n", "Requirement already satisfied: pillow in /home/dongsun/.local/lib/python3.10/site-packages (from gradio) (9.5.0)\n", "Requirement already satisfied: numpy in /home/dongsun/.local/lib/python3.10/site-packages (from gradio) (1.23.0)\n", "Requirement already satisfied: python-multipart in /home/dongsun/.local/lib/python3.10/site-packages (from gradio) (0.0.5)\n", "Requirement already satisfied: markupsafe in /home/dongsun/.local/lib/python3.10/site-packages (from gradio) (2.1.0)\n", "Requirement already satisfied: pydantic in /home/dongsun/.local/lib/python3.10/site-packages (from gradio) (1.8.2)\n", "Requirement already satisfied: aiohttp in /home/dongsun/.local/lib/python3.10/site-packages (from gradio) (3.8.1)\n", "Requirement already satisfied: websockets>=10.0 in /home/dongsun/.local/lib/python3.10/site-packages (from gradio) (10.3)\n", "Requirement already satisfied: typing-extensions in /home/dongsun/.local/lib/python3.10/site-packages (from gradio) (4.5.0)\n", "Requirement already satisfied: gradio-client>=0.2.4 in /home/dongsun/.local/lib/python3.10/site-packages (from gradio) (0.2.5)\n", "Requirement already satisfied: matplotlib in /home/dongsun/.local/lib/python3.10/site-packages (from gradio) (3.7.0)\n", "Requirement already satisfied: huggingface-hub>=0.13.0 in /home/dongsun/.local/lib/python3.10/site-packages (from gradio) (0.15.1)\n", "Requirement already satisfied: pydub in /home/dongsun/.local/lib/python3.10/site-packages (from gradio) (0.25.1)\n", "Requirement already satisfied: fastapi in /home/dongsun/.local/lib/python3.10/site-packages (from gradio) (0.94.0)\n", "Requirement already satisfied: requests in /home/dongsun/.local/lib/python3.10/site-packages (from gradio) (2.31.0)\n", "Requirement already satisfied: jinja2 in /home/dongsun/.local/lib/python3.10/site-packages (from gradio) (3.0.3)\n", "Requirement already satisfied: pyyaml in /home/dongsun/.local/lib/python3.10/site-packages (from gradio) (6.0)\n", "Requirement already satisfied: aiofiles in /home/dongsun/.local/lib/python3.10/site-packages (from gradio) (23.1.0)\n", "Requirement already satisfied: altair>=4.2.0 in /home/dongsun/.local/lib/python3.10/site-packages (from gradio) (4.2.2)\n", "Requirement already satisfied: jsonschema>=3.0 in /home/dongsun/.local/lib/python3.10/site-packages (from altair>=4.2.0->gradio) (4.4.0)\n", "Requirement already satisfied: entrypoints in /home/dongsun/.local/lib/python3.10/site-packages (from altair>=4.2.0->gradio) (0.4)\n", "Requirement already satisfied: toolz in /home/dongsun/.local/lib/python3.10/site-packages (from altair>=4.2.0->gradio) (0.11.2)\n", "Requirement already satisfied: fsspec in /home/dongsun/.local/lib/python3.10/site-packages (from gradio-client>=0.2.4->gradio) (2022.3.0)\n", "Requirement already satisfied: packaging in /home/dongsun/.local/lib/python3.10/site-packages (from gradio-client>=0.2.4->gradio) (23.1)\n", "Requirement already satisfied: filelock in /home/dongsun/.local/lib/python3.10/site-packages (from huggingface-hub>=0.13.0->gradio) (3.11.0)\n", "Requirement already satisfied: tqdm>=4.42.1 in /home/dongsun/.local/lib/python3.10/site-packages (from huggingface-hub>=0.13.0->gradio) (4.65.0)\n", "Requirement already satisfied: mdurl~=0.1 in /home/dongsun/.local/lib/python3.10/site-packages (from markdown-it-py[linkify]>=2.0.0->gradio) (0.1.2)\n", "Requirement already satisfied: linkify-it-py~=1.0 in /home/dongsun/.local/lib/python3.10/site-packages (from markdown-it-py[linkify]>=2.0.0->gradio) (1.0.3)\n", "Requirement already satisfied: pytz>=2020.1 in /usr/lib/python3.10/site-packages (from pandas->gradio) (2022.7)\n", "Requirement already satisfied: tzdata>=2022.1 in /home/dongsun/.local/lib/python3.10/site-packages (from pandas->gradio) (2022.7)\n", "Requirement already satisfied: python-dateutil>=2.8.2 in /usr/lib/python3.10/site-packages (from pandas->gradio) (2.8.2)\n", "Requirement already satisfied: h11>=0.8 in /home/dongsun/.local/lib/python3.10/site-packages (from uvicorn>=0.14.0->gradio) (0.12.0)\n", "Requirement already satisfied: click>=7.0 in /usr/lib/python3.10/site-packages (from uvicorn>=0.14.0->gradio) (8.1.3)\n", "Requirement already satisfied: frozenlist>=1.1.1 in /home/dongsun/.local/lib/python3.10/site-packages (from aiohttp->gradio) (1.3.0)\n", "Requirement already satisfied: async-timeout<5.0,>=4.0.0a3 in /home/dongsun/.local/lib/python3.10/site-packages (from aiohttp->gradio) (4.0.2)\n", "Requirement already satisfied: charset-normalizer<3.0,>=2.0 in /home/dongsun/.local/lib/python3.10/site-packages (from aiohttp->gradio) (2.1.1)\n", "Requirement already satisfied: attrs>=17.3.0 in /home/dongsun/.local/lib/python3.10/site-packages (from aiohttp->gradio) (21.4.0)\n", "Requirement already satisfied: yarl<2.0,>=1.0 in /home/dongsun/.local/lib/python3.10/site-packages (from aiohttp->gradio) (1.7.2)\n", "Requirement already satisfied: multidict<7.0,>=4.5 in /home/dongsun/.local/lib/python3.10/site-packages (from aiohttp->gradio) (6.0.2)\n", "Requirement already satisfied: aiosignal>=1.1.2 in /home/dongsun/.local/lib/python3.10/site-packages (from aiohttp->gradio) (1.2.0)\n", "Requirement already satisfied: starlette<0.27.0,>=0.26.0 in /home/dongsun/.local/lib/python3.10/site-packages (from fastapi->gradio) (0.26.1)\n", "Requirement already satisfied: certifi in /home/dongsun/.local/lib/python3.10/site-packages (from httpx->gradio) (2022.12.7)\n", "Requirement already satisfied: sniffio in /home/dongsun/.local/lib/python3.10/site-packages (from httpx->gradio) (1.2.0)\n", "Requirement already satisfied: rfc3986[idna2008]<2,>=1.3 in /home/dongsun/.local/lib/python3.10/site-packages (from httpx->gradio) (1.5.0)\n", "Requirement already satisfied: httpcore<0.16.0,>=0.15.0 in /home/dongsun/.local/lib/python3.10/site-packages (from httpx->gradio) (0.15.0)\n", "Requirement already satisfied: kiwisolver>=1.0.1 in /home/dongsun/.local/lib/python3.10/site-packages (from matplotlib->gradio) (1.3.2)\n", "Requirement already satisfied: fonttools>=4.22.0 in /home/dongsun/.local/lib/python3.10/site-packages (from matplotlib->gradio) (4.29.1)\n", "Requirement already satisfied: contourpy>=1.0.1 in /home/dongsun/.local/lib/python3.10/site-packages (from matplotlib->gradio) (1.0.7)\n", "Requirement already satisfied: pyparsing>=2.3.1 in /usr/lib/python3.10/site-packages (from matplotlib->gradio) (3.0.9)\n", "Requirement already satisfied: cycler>=0.10 in /home/dongsun/.local/lib/python3.10/site-packages (from matplotlib->gradio) (0.11.0)\n", "Requirement already satisfied: six>=1.4.0 in /usr/lib/python3.10/site-packages (from python-multipart->gradio) (1.16.0)\n", "Requirement already satisfied: idna<4,>=2.5 in /home/dongsun/.local/lib/python3.10/site-packages (from requests->gradio) (2.10)\n", "Requirement already satisfied: urllib3<3,>=1.21.1 in /home/dongsun/.local/lib/python3.10/site-packages (from requests->gradio) (1.26.15)\n", "Requirement already satisfied: anyio==3.* in /home/dongsun/.local/lib/python3.10/site-packages (from httpcore<0.16.0,>=0.15.0->httpx->gradio) (3.5.0)\n", "Requirement already satisfied: pyrsistent!=0.17.0,!=0.17.1,!=0.17.2,>=0.14.0 in /home/dongsun/.local/lib/python3.10/site-packages (from jsonschema>=3.0->altair>=4.2.0->gradio) (0.18.1)\n", "Requirement already satisfied: uc-micro-py in /home/dongsun/.local/lib/python3.10/site-packages (from linkify-it-py~=1.0->markdown-it-py[linkify]>=2.0.0->gradio) (1.0.1)\n", "\u001b[33mWARNING: Ignoring invalid distribution -orch (/home/dongsun/.local/lib/python3.10/site-packages)\u001b[0m\u001b[33m\n", "\u001b[0m\u001b[33mWARNING: Ignoring invalid distribution -orch (/home/dongsun/.local/lib/python3.10/site-packages)\u001b[0m\u001b[33m\n", "\u001b[0m\u001b[33mWARNING: Ignoring invalid distribution -orch (/home/dongsun/.local/lib/python3.10/site-packages)\u001b[0m\u001b[33m\n", "\u001b[0m\u001b[33mWARNING: Ignoring invalid distribution -orch (/home/dongsun/.local/lib/python3.10/site-packages)\u001b[0m\u001b[33m\n", "\u001b[0m\n", "\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m A new release of pip is available: \u001b[0m\u001b[31;49m23.0.1\u001b[0m\u001b[39;49m -> \u001b[0m\u001b[32;49m23.2.1\u001b[0m\n", "\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m To update, run: \u001b[0m\u001b[32;49mpython -m pip install --upgrade pip\u001b[0m\n" ] } ], "source": [ "!pip install gradio" ] }, { "cell_type": "code", "execution_count": 4, "id": "6b59cf2b-2826-40be-a27f-e6dbfe0cc1c0", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Running on local URL: http://127.0.0.1:7860\n", "Running on public URL: https://b3512daf295a0b63b1.gradio.live\n", "\n", "This share link expires in 72 hours. For free permanent hosting and GPU upgrades (NEW!), check out Spaces: https://huggingface.co/spaces\n" ] }, { "data": { "text/html": [ "
" ], "text/plain": [ "" ] }, "metadata": {}, "output_type": "display_data" }, { "data": { "text/plain": [] }, "execution_count": 4, "metadata": {}, "output_type": "execute_result" }, { "name": "stdout", "output_type": "stream", "text": [ "text_loaded\n", "Audio encoded\n", "VALL-E EOS [356 -> 899]\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "/home/dongsun/.local/lib/python3.10/site-packages/gradio/processing_utils.py:171: UserWarning: Trying to convert audio automatically from float32 to 16-bit int format.\n", " warnings.warn(warning.format(data.dtype))\n" ] } ], "source": [ "import gradio as gr\n", "app = gr.Blocks(title=\"VALL-E Korean\")\n", "with app:\n", " #gr.Markdown(top_md)\n", " with gr.Tab(\"VALL-E Korean Demo\"):\n", " #gr.Markdown(infer_from_prompt_md)\n", " with gr.Row():\n", " with gr.Column():\n", " text_prompt = gr.TextArea(label=\"Input Text\",\n", " placeholder=\"Type text in the audio file (Korean)\",)\n", " audio_prompt= gr.Audio(label=\"Input Audio\", source='upload', interactive=True, type=\"filepath\")\n", " text_input = gr.TextArea(label=\"Output Text\",\n", " placeholder=\"Type text you want to generate (Korean)\",)\n", " with gr.Column():\n", " text_output = gr.Textbox(label=\"Message\")\n", " audio_output= gr.Audio(label=\"Output Audio\")\n", " btn = gr.Button(\"Generate!\")\n", " btn.click(infer_from_prompt,\n", " inputs=[text_prompt, audio_prompt, text_input],\n", " outputs=[text_output, audio_output])\n", "webbrowser.open(\"http://127.0.0.1:7860\")\n", "app.launch(share=True)" ] }, { "cell_type": "code", "execution_count": null, "id": "fafc648b-2165-45a1-b422-38ced5f4d8fa", "metadata": {}, "outputs": [], "source": [] } ], "metadata": { "kernelspec": { "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.10.9" } }, "nbformat": 4, "nbformat_minor": 5 }