{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import os\n",
    "import sys\n",
    "\n",
    "# 设置路径\n",
    "current_dir = os.getcwd()\n",
    "matcha_path = os.path.join(current_dir, 'third_party/Matcha-TTS')\n",
    "if matcha_path not in sys.path:\n",
    "    sys.path.append(matcha_path)\n",
    "    print(f\"Added Matcha-TTS path: {matcha_path}\")\n",
    "\n",
    "import argparse\n",
    "import gradio as gr\n",
    "import numpy as np\n",
    "import torch\n",
    "import torchaudio\n",
    "import random\n",
    "import librosa\n",
    "from cosyvoice.cli.cosyvoice import CosyVoice\n",
    "from cosyvoice.utils.file_utils import load_wav, logging\n",
    "from cosyvoice.utils.common import set_all_random_seed"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 全局配置\n",
    "inference_mode_list = ['预训练音色', '3s极速复刻', '跨语种复刻', '自然语言控制']\n",
    "instruct_dict = {\n",
    "    '预训练音色': '1. 选择预训练音色\\n2. 点击生成音频按钮',\n",
    "    '3s极速复刻': '1. 选择prompt音频文件，或录入prompt音频，注意不超过30s，若同时提供，优先选择prompt音频文件\\n2. 输入prompt文本\\n3. 点击生成音频按钮',\n",
    "    '跨语种复刻': '1. 选择prompt音频文件，或录入prompt音频，注意不超过30s，若同时提供，优先选择prompt音频文件\\n2. 点击生成音频按钮',\n",
    "    '自然语言控制': '1. 选择预训练音色\\n2. 输入instruct文本\\n3. 点击生成音频按钮'\n",
    "}\n",
    "stream_mode_list = [('否', False), ('是', True)]\n",
    "max_val = 0.8\n",
    "prompt_sr, target_sr = 16000, 22050\n",
    "default_data = np.zeros(target_sr)\n",
    "\n",
    "# 初始化模型\n",
    "model_dir = 'pretrained_models/CosyVoice-300M'  # 可以根据需要修改\n",
    "cosyvoice = CosyVoice(model_dir)\n",
    "sft_spk = cosyvoice.list_avaliable_spks()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "def generate_seed():\n",
    "    seed = random.randint(1, 100000000)\n",
    "    return {\n",
    "        \"__type__\": \"update\",\n",
    "        \"value\": seed\n",
    "    }\n",
    "\n",
    "def postprocess(speech, top_db=60, hop_length=220, win_length=440):\n",
    "    speech, _ = librosa.effects.trim(\n",
    "        speech, top_db=top_db,\n",
    "        frame_length=win_length,\n",
    "        hop_length=hop_length\n",
    "    )\n",
    "    if speech.abs().max() > max_val:\n",
    "        speech = speech / speech.abs().max() * max_val\n",
    "    speech = torch.concat([speech, torch.zeros(1, int(target_sr * 0.2))], dim=1)\n",
    "    return speech\n",
    "\n",
    "def change_instruction(mode_checkbox_group):\n",
    "    return instruct_dict[mode_checkbox_group]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "def generate_audio(tts_text, mode_checkbox_group, sft_dropdown, prompt_text, prompt_wav_upload, prompt_wav_record, instruct_text,\n",
    "                   seed, stream, speed):\n",
    "    if prompt_wav_upload is not None:\n",
    "        prompt_wav = prompt_wav_upload\n",
    "    elif prompt_wav_record is not None:\n",
    "        prompt_wav = prompt_wav_record\n",
    "    else:\n",
    "        prompt_wav = None\n",
    "    # if instruct mode, please make sure that model is iic/CosyVoice-300M-Instruct and not cross_lingual mode\n",
    "    if mode_checkbox_group in ['自然语言控制']:\n",
    "        if cosyvoice.frontend.instruct is False:\n",
    "            gr.Warning('您正在使用自然语言控制模式, {}模型不支持此模式, 请使用iic/CosyVoice-300M-Instruct模型'.format(args.model_dir))\n",
    "            yield (target_sr, default_data)\n",
    "        if instruct_text == '':\n",
    "            gr.Warning('您正在使用自然语言控制模式, 请输入instruct文本')\n",
    "            yield (target_sr, default_data)\n",
    "        if prompt_wav is not None or prompt_text != '':\n",
    "            gr.Info('您正在使用自然语言控制模式, prompt音频/prompt文本会被忽略')\n",
    "    # if cross_lingual mode, please make sure that model is iic/CosyVoice-300M and tts_text prompt_text are different language\n",
    "    if mode_checkbox_group in ['跨语种复刻']:\n",
    "        if cosyvoice.frontend.instruct is True:\n",
    "            gr.Warning('您正在使用跨语种复刻模式, {}模型不支持此模式, 请使用iic/CosyVoice-300M模型'.format(args.model_dir))\n",
    "            yield (target_sr, default_data)\n",
    "        if instruct_text != '':\n",
    "            gr.Info('您正在使用跨语种复刻模式, instruct文本会被忽略')\n",
    "        if prompt_wav is None:\n",
    "            gr.Warning('您正在使用跨语种复刻模式, 请提供prompt音频')\n",
    "            yield (target_sr, default_data)\n",
    "        gr.Info('您正在使用跨语种复刻模式, 请确保合成文本和prompt文本为不同语言')\n",
    "    # if in zero_shot cross_lingual, please make sure that prompt_text and prompt_wav meets requirements\n",
    "    if mode_checkbox_group in ['3s极速复刻', '跨语种复刻']:\n",
    "        if prompt_wav is None:\n",
    "            gr.Warning('prompt音频为空，您是否忘记输入prompt音频？')\n",
    "            yield (target_sr, default_data)\n",
    "        if torchaudio.info(prompt_wav).sample_rate < prompt_sr:\n",
    "            gr.Warning('prompt音频采样率{}低于{}'.format(torchaudio.info(prompt_wav).sample_rate, prompt_sr))\n",
    "            yield (target_sr, default_data)\n",
    "    # sft mode only use sft_dropdown\n",
    "    if mode_checkbox_group in ['预训练音色']:\n",
    "        if instruct_text != '' or prompt_wav is not None or prompt_text != '':\n",
    "            gr.Info('您正在使用预训练音色模式，prompt文本/prompt音频/instruct文本会被忽略！')\n",
    "    # zero_shot mode only use prompt_wav prompt text\n",
    "    if mode_checkbox_group in ['3s极速复刻']:\n",
    "        if prompt_text == '':\n",
    "            gr.Warning('prompt文本为空，您是否忘记输入prompt文本？')\n",
    "            yield (target_sr, default_data)\n",
    "        if instruct_text != '':\n",
    "            gr.Info('您正在使用3s极速复刻模式，预训练音色/instruct文本会被忽略！')\n",
    "\n",
    "    if mode_checkbox_group == '预训练音色':\n",
    "        logging.info('get sft inference request')\n",
    "        set_all_random_seed(seed)\n",
    "        for i in cosyvoice.inference_sft(tts_text, sft_dropdown, stream=stream, speed=speed):\n",
    "            yield (target_sr, i['tts_speech'].numpy().flatten())\n",
    "    elif mode_checkbox_group == '3s极速复刻':\n",
    "        logging.info('get zero_shot inference request')\n",
    "        prompt_speech_16k = postprocess(load_wav(prompt_wav, prompt_sr))\n",
    "        set_all_random_seed(seed)\n",
    "        for i in cosyvoice.inference_zero_shot(tts_text, prompt_text, prompt_speech_16k, stream=stream, speed=speed):\n",
    "            yield (target_sr, i['tts_speech'].numpy().flatten())\n",
    "    elif mode_checkbox_group == '跨语种复刻':\n",
    "        logging.info('get cross_lingual inference request')\n",
    "        prompt_speech_16k = postprocess(load_wav(prompt_wav, prompt_sr))\n",
    "        set_all_random_seed(seed)\n",
    "        for i in cosyvoice.inference_cross_lingual(tts_text, prompt_speech_16k, stream=stream, speed=speed):\n",
    "            yield (target_sr, i['tts_speech'].numpy().flatten())\n",
    "    else:\n",
    "        logging.info('get instruct inference request')\n",
    "        set_all_random_seed(seed)\n",
    "        for i in cosyvoice.inference_instruct(tts_text, sft_dropdown, instruct_text, stream=stream, speed=speed):\n",
    "            yield (target_sr, i['tts_speech'].numpy().flatten())"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "def batch_generate_audio(text_list, mode_checkbox_group, sft_dropdown, prompt_text, \n",
    "                        prompt_wav_upload, prompt_wav_record, instruct_text, seed, stream, speed):\n",
    "    \"\"\"批量生成音频\"\"\"\n",
    "    if not text_list:\n",
    "        gr.Warning(\"请先上传文本文件！\")\n",
    "        # 返回5个音频和5个文件名的空值\n",
    "        return [None] * 5 + [\"\"] * 5\n",
    "    \n",
    "    # 初始化结果列表\n",
    "    audio_outputs = [None] * 5  # 5个音频位置\n",
    "    filename_outputs = [\"\"] * 5  # 5个文件名位置\n",
    "    \n",
    "    try:\n",
    "        # 处理每个文本文件\n",
    "        for idx, text_item in enumerate(text_list):\n",
    "            if idx >= 5:  # 最多处理5个文件\n",
    "                gr.Warning(f\"已达到最大处理数量(5个文件)，剩余文件将被忽略\")\n",
    "                break\n",
    "                \n",
    "            content = text_item[\"content\"]\n",
    "            filename = text_item[\"filename\"]\n",
    "            \n",
    "            # 使用现有的 generate_audio 函数生成音频\n",
    "            audio_generator = generate_audio(content, mode_checkbox_group, sft_dropdown, prompt_text, \n",
    "                                          prompt_wav_upload, prompt_wav_record, instruct_text, \n",
    "                                          seed, stream, speed)\n",
    "            \n",
    "            # 获取生成的音频\n",
    "            for audio in audio_generator:\n",
    "                sample_rate, audio_data = audio\n",
    "                audio_outputs[idx] = (sample_rate, audio_data)\n",
    "                filename_outputs[idx] = f\"文件: {filename}\"\n",
    "                break\n",
    "        \n",
    "        # 返回所有结果（5个音频 + 5个文件名）\n",
    "        return audio_outputs + filename_outputs\n",
    "            \n",
    "    except Exception as e:\n",
    "        gr.Warning(f\"生成音频时出错: {str(e)}\")\n",
    "        return [None] * 5 + [\"\"] * 5"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 更新标签页标题的函数\n",
    "def update_tab_titles(*results):\n",
    "    audio_results = results[:5]\n",
    "    filename_results = results[5:]\n",
    "    \n",
    "    updated_tabs = []\n",
    "    for i, (audio, filename) in enumerate(zip(audio_results, filename_results)):\n",
    "        if audio is not None and filename:\n",
    "            # 如果有音频和文件名，使用文件名作为标签\n",
    "            updated_tabs.append(gr.update(label=filename))\n",
    "        else:\n",
    "            # 否则使用默认标签\n",
    "            updated_tabs.append(gr.update(label=f\"音频 {i+1}\"))\n",
    "    \n",
    "    return updated_tabs"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 26,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-11-20 11:04:28,004 DEBUG load_ssl_context verify=True cert=None trust_env=True http2=False\n",
      "2024-11-20 11:04:28,006 DEBUG Starting new HTTPS connection (1): huggingface.co:443\n",
      "2024-11-20 11:04:28,007 DEBUG load_verify_locations cafile='/home/ad/miniconda3/envs/cosyvoice_env/lib/python3.8/site-packages/certifi/cacert.pem'\n",
      "/home/ad/miniconda3/envs/cosyvoice_env/lib/python3.8/site-packages/gradio/components/base.py:193: UserWarning: 'scale' value should be an integer. Using 0.5 will cause issues.\n",
      "  warnings.warn(\n",
      "/home/ad/miniconda3/envs/cosyvoice_env/lib/python3.8/site-packages/gradio/components/base.py:193: UserWarning: 'scale' value should be an integer. Using 0.25 will cause issues.\n",
      "  warnings.warn(\n",
      "/home/ad/miniconda3/envs/cosyvoice_env/lib/python3.8/site-packages/gradio/layouts/column.py:55: UserWarning: 'scale' value should be an integer. Using 0.25 will cause issues.\n",
      "  warnings.warn(\n",
      "2024-11-20 11:04:28,077 DEBUG connect_tcp.started host='api.gradio.app' port=443 local_address=None timeout=3 socket_options=None\n",
      "2024-11-20 11:04:28,116 DEBUG load_ssl_context verify=True cert=None trust_env=True http2=False\n",
      "2024-11-20 11:04:28,116 DEBUG load_verify_locations cafile='/home/ad/miniconda3/envs/cosyvoice_env/lib/python3.8/site-packages/certifi/cacert.pem'\n",
      "2024-11-20 11:04:28,120 DEBUG connect_tcp.started host='127.0.0.1' port=8000 local_address=None timeout=None socket_options=None\n",
      "2024-11-20 11:04:28,120 DEBUG connect_tcp.complete return_value=<httpcore._backends.sync.SyncStream object at 0x7f7a04942ac0>\n",
      "2024-11-20 11:04:28,120 DEBUG send_request_headers.started request=<Request [b'GET']>\n",
      "2024-11-20 11:04:28,121 DEBUG send_request_headers.complete\n",
      "2024-11-20 11:04:28,121 DEBUG send_request_body.started request=<Request [b'GET']>\n",
      "2024-11-20 11:04:28,121 DEBUG send_request_body.complete\n",
      "2024-11-20 11:04:28,121 DEBUG receive_response_headers.started request=<Request [b'GET']>\n",
      "2024-11-20 11:04:28,122 DEBUG receive_response_headers.complete return_value=(b'HTTP/1.1', 200, b'OK', [(b'date', b'Wed, 20 Nov 2024 03:04:28 GMT'), (b'server', b'uvicorn'), (b'content-length', b'4'), (b'content-type', b'application/json')])\n",
      "2024-11-20 11:04:28,122 INFO HTTP Request: GET http://127.0.0.1:8000/startup-events \"HTTP/1.1 200 OK\"\n",
      "2024-11-20 11:04:28,122 DEBUG receive_response_body.started request=<Request [b'GET']>\n",
      "2024-11-20 11:04:28,123 DEBUG receive_response_body.complete\n",
      "2024-11-20 11:04:28,123 DEBUG response_closed.started\n",
      "2024-11-20 11:04:28,123 DEBUG response_closed.complete\n",
      "2024-11-20 11:04:28,123 DEBUG close.started\n",
      "2024-11-20 11:04:28,123 DEBUG close.complete\n",
      "2024-11-20 11:04:28,124 DEBUG load_ssl_context verify=False cert=None trust_env=True http2=False\n",
      "2024-11-20 11:04:28,124 DEBUG connect_tcp.started host='127.0.0.1' port=8000 local_address=None timeout=3 socket_options=None\n",
      "2024-11-20 11:04:28,125 DEBUG connect_tcp.complete return_value=<httpcore._backends.sync.SyncStream object at 0x7f7a04945ac0>\n",
      "2024-11-20 11:04:28,125 DEBUG send_request_headers.started request=<Request [b'HEAD']>\n",
      "2024-11-20 11:04:28,126 DEBUG send_request_headers.complete\n",
      "2024-11-20 11:04:28,126 DEBUG send_request_body.started request=<Request [b'HEAD']>\n",
      "2024-11-20 11:04:28,127 DEBUG send_request_body.complete\n",
      "2024-11-20 11:04:28,129 DEBUG receive_response_headers.started request=<Request [b'HEAD']>\n",
      "2024-11-20 11:04:28,130 DEBUG receive_response_headers.complete return_value=(b'HTTP/1.1', 200, b'OK', [(b'date', b'Wed, 20 Nov 2024 03:04:28 GMT'), (b'server', b'uvicorn'), (b'content-length', b'61551'), (b'content-type', b'text/html; charset=utf-8')])\n",
      "2024-11-20 11:04:28,130 INFO HTTP Request: HEAD http://127.0.0.1:8000/ \"HTTP/1.1 200 OK\"\n",
      "2024-11-20 11:04:28,130 DEBUG receive_response_body.started request=<Request [b'HEAD']>\n",
      "2024-11-20 11:04:28,130 DEBUG receive_response_body.complete\n",
      "2024-11-20 11:04:28,130 DEBUG response_closed.started\n",
      "2024-11-20 11:04:28,130 DEBUG response_closed.complete\n",
      "2024-11-20 11:04:28,131 DEBUG close.started\n",
      "2024-11-20 11:04:28,131 DEBUG close.complete\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Running on local URL:  http://127.0.0.1:8000\n",
      "\n",
      "To create a public link, set `share=True` in `launch()`.\n"
     ]
    },
    {
     "data": {
      "text/html": [
       "<div><iframe src=\"http://127.0.0.1:8000/\" width=\"100%\" height=\"500\" allow=\"autoplay; camera; microphone; clipboard-read; clipboard-write;\" frameborder=\"0\" allowfullscreen></iframe></div>"
      ],
      "text/plain": [
       "<IPython.core.display.HTML object>"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-11-20 11:04:28,135 DEBUG Starting new HTTPS connection (3): huggingface.co:443\n"
     ]
    },
    {
     "data": {
      "text/plain": []
     },
     "execution_count": 26,
     "metadata": {},
     "output_type": "execute_result"
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-11-20 11:04:28,281 DEBUG connect_tcp.complete return_value=<httpcore._backends.sync.SyncStream object at 0x7f7a04a23100>\n",
      "2024-11-20 11:04:28,281 DEBUG start_tls.started ssl_context=<ssl.SSLContext object at 0x7f78fd72a0c0> server_hostname='api.gradio.app' timeout=3\n",
      "2024-11-20 11:04:28,646 DEBUG start_tls.complete return_value=<httpcore._backends.sync.SyncStream object at 0x7f7a04e930d0>\n",
      "2024-11-20 11:04:28,646 DEBUG send_request_headers.started request=<Request [b'GET']>\n",
      "2024-11-20 11:04:28,647 DEBUG send_request_headers.complete\n",
      "2024-11-20 11:04:28,647 DEBUG send_request_body.started request=<Request [b'GET']>\n",
      "2024-11-20 11:04:28,648 DEBUG send_request_body.complete\n",
      "2024-11-20 11:04:28,648 DEBUG receive_response_headers.started request=<Request [b'GET']>\n",
      "2024-11-20 11:04:28,830 DEBUG receive_response_headers.complete return_value=(b'HTTP/1.1', 200, b'OK', [(b'Date', b'Wed, 20 Nov 2024 03:04:28 GMT'), (b'Content-Type', b'application/json'), (b'Content-Length', b'21'), (b'Connection', b'keep-alive'), (b'Server', b'nginx/1.18.0'), (b'Access-Control-Allow-Origin', b'*')])\n",
      "2024-11-20 11:04:28,831 INFO HTTP Request: GET https://api.gradio.app/pkg-version \"HTTP/1.1 200 OK\"\n",
      "2024-11-20 11:04:28,831 DEBUG receive_response_body.started request=<Request [b'GET']>\n",
      "2024-11-20 11:04:28,832 DEBUG receive_response_body.complete\n",
      "2024-11-20 11:04:28,832 DEBUG response_closed.started\n",
      "2024-11-20 11:04:28,832 DEBUG response_closed.complete\n",
      "2024-11-20 11:04:28,832 DEBUG close.started\n",
      "2024-11-20 11:04:28,833 DEBUG close.complete\n",
      "2024-11-20 11:04:39,994 INFO get sft inference request\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "tn 我是通义实验室语音团队全新推出的生成式语音大模型，提供舒适自然的语音合成能力。 to 我是通义实验室语音团队全新推出的生成式语音大模型，提供舒适自然的语音合成能力。\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "  0%|          | 0/1 [00:00<?, ?it/s]2024-11-20 11:04:40,033 INFO synthesis text 我是通义实验室语音团队全新推出的生成式语音大模型，提供舒适自然的语音合成能力。\n",
      "2024-11-20 11:04:42,168 INFO yield speech len 7.453605442176871, rtf 0.2863396855645648\n",
      "/home/ad/miniconda3/envs/cosyvoice_env/lib/python3.8/site-packages/gradio/processing_utils.py:574: UserWarning: Trying to convert audio automatically from float32 to 16-bit int format.\n",
      "  warnings.warn(warning.format(data.dtype))\n",
      "100%|██████████| 1/1 [00:02<00:00,  2.15s/it]\n",
      "2024-11-20 11:06:36,266 INFO get sft inference request\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "tn 都说了多少遍星期一需要go work，不然没有salary用来live a better life，等到了sunday你自然会得到一天的假期 to 都说了多少遍星期一需要go work，不然没有salary用来live a better life，等到了sunday你自然会得到一天的假期\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "  0%|          | 0/1 [00:00<?, ?it/s]2024-11-20 11:06:36,320 INFO synthesis text 都说了多少遍星期一需要go work，不然没有salary用来live a better life，等到了sunday你自然会得到一天的假期。\n",
      "2024-11-20 11:06:38,742 INFO yield speech len 8.335963718820862, rtf 0.29059364278813754\n",
      "100%|██████████| 1/1 [00:02<00:00,  2.43s/it]\n",
      "2024-11-20 11:06:51,148 INFO get sft inference request\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "tn 都说了多少遍星期一需要go work，不然没有salary用来live a better life，等到了sunday你自然会得到一天的假期 to 都说了多少遍星期一需要go work，不然没有salary用来live a better life，等到了sunday你自然会得到一天的假期\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "  0%|          | 0/1 [00:00<?, ?it/s]2024-11-20 11:06:51,179 INFO synthesis text 都说了多少遍星期一需要go work，不然没有salary用来live a better life，等到了sunday你自然会得到一天的假期。\n",
      "2024-11-20 11:06:54,002 INFO yield speech len 10.135510204081633, rtf 0.2785665712789776\n",
      "100%|██████████| 1/1 [00:02<00:00,  2.83s/it]\n",
      "2024-11-20 11:07:06,128 INFO get sft inference request\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "tn 都说了多少遍星期一需要go work，不然没有salary用来live a better life，等到了sunday你自然会得到一天的假期 to 都说了多少遍星期一需要go work，不然没有salary用来live a better life，等到了sunday你自然会得到一天的假期\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "  0%|          | 0/1 [00:00<?, ?it/s]2024-11-20 11:07:06,158 INFO synthesis text 都说了多少遍星期一需要go work，不然没有salary用来live a better life，等到了sunday你自然会得到一天的假期。\n",
      "2024-11-20 11:07:09,678 INFO yield speech len 11.215238095238096, rtf 0.3138340554078636\n",
      "100%|██████████| 1/1 [00:03<00:00,  3.52s/it]\n",
      "2024-11-20 11:07:24,104 INFO get sft inference request\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "tn 都说了多少遍星期一需要go work，不然没有salary用来live a better life，等到了sunday你自然会得到一天的假期 to 都说了多少遍星期一需要go work，不然没有salary用来live a better life，等到了sunday你自然会得到一天的假期\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "  0%|          | 0/1 [00:00<?, ?it/s]2024-11-20 11:07:24,134 INFO synthesis text 都说了多少遍星期一需要go work，不然没有salary用来live a better life，等到了sunday你自然会得到一天的假期。\n",
      "2024-11-20 11:07:27,027 INFO yield speech len 10.437369614512471, rtf 0.27716458657004345\n",
      "100%|██████████| 1/1 [00:02<00:00,  2.90s/it]\n"
     ]
    }
   ],
   "source": [
    "def read_txt_file(file_obj):\n",
    "    \"\"\"读取上传的txt文件内容\"\"\"\n",
    "    if file_obj is None:\n",
    "        return \"\"\n",
    "    try:\n",
    "        with open(file_obj.name, 'r', encoding='utf-8') as f:\n",
    "            text = f.read().strip()\n",
    "        return text\n",
    "    except Exception as e:\n",
    "        gr.Warning(f\"读取文件失败: {str(e)}\")\n",
    "        return \"\"\n",
    "\n",
    "def read_multiple_txt_files(files):\n",
    "    \"\"\"读取多个上传的txt文件内容\"\"\"\n",
    "    if not files:\n",
    "        return [], gr.update(visible=False)\n",
    "    try:\n",
    "        texts = []\n",
    "        for file in files:\n",
    "            with open(file.name, 'r', encoding='utf-8') as f:\n",
    "                text = f.read().strip()\n",
    "                texts.append({\"filename\": os.path.basename(file.name), \"content\": text})\n",
    "        return texts, gr.update(visible=True)\n",
    "    except Exception as e:\n",
    "        gr.Warning(f\"读取文件失败: {str(e)}\")\n",
    "        return [], gr.update(visible=False)\n",
    "\n",
    "# 启动服务器\n",
    "demo = gr.Blocks()\n",
    "with demo:\n",
    "    gr.Markdown(\"### 代码库 [CosyVoice](https://github.com/FunAudioLLM/CosyVoice) \\\n",
    "                预训练模型 [CosyVoice-300M](https://www.modelscope.cn/models/iic/CosyVoice-300M) \\\n",
    "                [CosyVoice-300M-Instruct](https://www.modelscope.cn/models/iic/CosyVoice-300M-Instruct) \\\n",
    "                [CosyVoice-300M-SFT](https://www.modelscope.cn/models/iic/CosyVoice-300M-SFT)\")\n",
    "    gr.Markdown(\"#### 请输入需要合成的文本，选择推理模式，并按照提示步骤进行操作\")\n",
    "\n",
    "    with gr.Tabs():\n",
    "        # 第一个标签页：单文件处理\n",
    "        with gr.Tab(\"单文本处理\"):\n",
    "            with gr.Row():\n",
    "                tts_text = gr.Textbox(label=\"输入合成文本\", \n",
    "                                    lines=3,\n",
    "                                    value=\"我是通义实验室语音团队全新推出的生成式语音大模型，提供舒适自然的语音合成能力。\")\n",
    "                txt_file = gr.File(label=\"上传TXT文本文件\", \n",
    "                                file_types=[\".txt\"],\n",
    "                                type=\"filepath\")\n",
    "            \n",
    "            # 添加生成按钮和音频输出到标签页内\n",
    "            generate_button = gr.Button(\"生成音频\")\n",
    "            with gr.Column(visible=True):\n",
    "                audio_output = gr.Audio(\n",
    "                    label=\"合成音频\",\n",
    "                    type=\"numpy\",\n",
    "                    interactive=False,\n",
    "                    autoplay=True,\n",
    "                    show_download_button=True\n",
    "                )\n",
    "        \n",
    "        # 第二个标签页：批量处理\n",
    "        with gr.Tab(\"批量处理\"):\n",
    "            txt_files = gr.Files(label=\"上传多个TXT文本文件\", \n",
    "                                file_types=[\".txt\"],\n",
    "                                type=\"filepath\")\n",
    "            file_contents = gr.JSON(label=\"已上传文件\", visible=False)\n",
    "            batch_generate_button = gr.Button(\"批量生成音频\", visible=False)\n",
    "            \n",
    "            # 使用 Tabs 组织音频输出\n",
    "            with gr.Column(visible=True) as output_column:\n",
    "                with gr.Tabs() as audio_tabs:  # 创建 Tabs 容器\n",
    "                    audio_outputs = []\n",
    "                    filename_outputs = []\n",
    "                    tabs = []  # 存储标签页引用\n",
    "                    \n",
    "                    # 创建5个标签页\n",
    "                    for i in range(5):\n",
    "                        with gr.Tab(f\"音频 {i+1}\") as tab:  \n",
    "                            audio = gr.Audio(\n",
    "                                label=\"生成的音频\",\n",
    "                                type=\"numpy\",\n",
    "                                interactive=False,  # 禁用交互（上传）功能\n",
    "                                autoplay=False,      # 自动播放\n",
    "                                show_download_button=True  # 显示下载按钮\n",
    "                            )\n",
    "                            filename = gr.Text(label=\"文件名\", visible=False)\n",
    "                            audio_outputs.append(audio)\n",
    "                            filename_outputs.append(filename)\n",
    "                            tabs.append(tab)  # 保存标签页引用\n",
    "    \n",
    "    # 共用的参数设置\n",
    "    with gr.Row():\n",
    "        mode_checkbox_group = gr.Radio(choices=inference_mode_list, \n",
    "                                     label='选择推理模式', \n",
    "                                     value=inference_mode_list[0])\n",
    "        instruction_text = gr.Text(label=\"操作步骤\", \n",
    "                                 value=instruct_dict[inference_mode_list[0]], \n",
    "                                 scale=0.5)\n",
    "        sft_dropdown = gr.Dropdown(choices=sft_spk, \n",
    "                                 label='选择预训练音色', \n",
    "                                 value=sft_spk[0], \n",
    "                                 scale=0.25)\n",
    "        stream = gr.Radio(choices=stream_mode_list, \n",
    "                        label='是否流式推理', \n",
    "                        value=stream_mode_list[0][1])\n",
    "        speed = gr.Number(value=1, \n",
    "                        label=\"速度调节(仅支持非流式推理)\", \n",
    "                        minimum=0.5, \n",
    "                        maximum=2.0, \n",
    "                        step=0.1)\n",
    "        with gr.Column(scale=0.25):\n",
    "            seed_button = gr.Button(value=\"\\U0001F3B2\")\n",
    "            seed = gr.Number(value=0, label=\"随机推理种子\")\n",
    "\n",
    "    with gr.Row():\n",
    "        prompt_wav_upload = gr.Audio(sources='upload', \n",
    "                                   type='filepath', \n",
    "                                   label='选择prompt音频文件，注意采样率不低于16khz')\n",
    "        prompt_wav_record = gr.Audio(sources='microphone', \n",
    "                                   type='filepath', \n",
    "                                   label='录制prompt音频文件')\n",
    "    \n",
    "    prompt_text = gr.Textbox(label=\"输入prompt文本\", \n",
    "                            lines=1, \n",
    "                            placeholder=\"请输入prompt文本，需与prompt音频内容一致，暂时不支持自动识别...\", \n",
    "                            value='')\n",
    "    \n",
    "    instruct_text = gr.Textbox(label=\"输入instruct文本\", \n",
    "                              lines=1, \n",
    "                              placeholder=\"请输入instruct文本.\", \n",
    "                              value='')\n",
    "\n",
    "    # 设置事件处理\n",
    "    txt_file.change(fn=read_txt_file,\n",
    "                   inputs=[txt_file],\n",
    "                   outputs=[tts_text])\n",
    "    \n",
    "    txt_files.change(\n",
    "        fn=read_multiple_txt_files,\n",
    "        inputs=[txt_files],\n",
    "        outputs=[file_contents, batch_generate_button]\n",
    "    )\n",
    "    \n",
    "    batch_generate_button.click(\n",
    "        fn=batch_generate_audio,\n",
    "        inputs=[file_contents, mode_checkbox_group, sft_dropdown, prompt_text, \n",
    "                prompt_wav_upload, prompt_wav_record, instruct_text, seed, stream, speed],\n",
    "        outputs=audio_outputs + filename_outputs\n",
    "    ).then(  # 添加后续处理来更新标签页标题\n",
    "        fn=update_tab_titles,\n",
    "        inputs=audio_outputs + filename_outputs,\n",
    "        outputs=tabs  # 使用保存的标签页引用\n",
    "    )\n",
    "                   \n",
    "    seed_button.click(generate_seed, \n",
    "                     inputs=[], \n",
    "                     outputs=seed)\n",
    "    \n",
    "    generate_button.click(generate_audio,\n",
    "                        inputs=[tts_text, mode_checkbox_group, sft_dropdown, \n",
    "                               prompt_text, prompt_wav_upload, prompt_wav_record, \n",
    "                               instruct_text, seed, stream, speed],\n",
    "                        outputs=[audio_output])\n",
    "    \n",
    "    mode_checkbox_group.change(fn=change_instruction, \n",
    "                             inputs=[mode_checkbox_group], \n",
    "                             outputs=[instruction_text])\n",
    "                             \n",
    "demo.queue(max_size=4, default_concurrency_limit=2)\n",
    "demo.launch(server_name='127.0.0.1', server_port=8000, prevent_thread_lock=True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 停止服务器\n",
    "demo.close()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import os\n",
    "def clear_port(port=8000):\n",
    "    try:\n",
    "        os.system(f\"kill -9 $(lsof -t -i:{port})\")\n",
    "        print(f\"端口 {port} 已清理\")\n",
    "    except:\n",
    "        print(\"清理端口失败，请手动重启内核\")\n",
    "\n",
    "clear_port(8000)"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "cosyvoice_env",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.8.20"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
