{ "nbformat": 4, "nbformat_minor": 0, "metadata": { "colab": { "provenance": [], "collapsed_sections": [ "3jNjgQ0JlCCL" ] }, "kernelspec": { "name": "python3", "display_name": "Python 3" }, "language_info": { "name": "python" } }, "cells": [ { "cell_type": "code", "source": [ "import gradio as gr\n", "import torch\n", "print(gr.__version__)\n", "print(torch.__version__)" ], "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "W70gW3rnp1QP", "outputId": "60be638b-06cd-4b33-e63d-6e5dc5237ca8" }, "execution_count": 2, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "3.23.0\n", "1.13.1\n" ] } ] }, { "cell_type": "code", "source": [ "from pathlib import Path\n", "\n", "MODEL_PATH = Path(\"models\") \n", "MODEL_PATH.mkdir(parents=True, exist_ok=True)" ], "metadata": { "id": "X-8HUWxxcVHr" }, "execution_count": 3, "outputs": [] }, { "cell_type": "code", "source": [ "import json\n", "from PIL import Image\n", "import numpy as np\n", "import os\n", "\n", "with open(\"models/model_info.json\", \"r\", encoding=\"utf-8\") as f:\n", " models_info = json.load(f)\n", "\n", "\n", "\n", "LANGUAGES = ['EN','CN','JP']\n", "speaker_id = 0\n", "\n", "\n" ], "metadata": { "id": "VGacf2W4AjYm" }, "execution_count": 37, "outputs": [] }, { "cell_type": "code", "execution_count": 20, "outputs": [], "source": [ "class CustomEncoder(json.JSONEncoder):\n", " def default(self, obj):\n", " if isinstance(obj, Path):\n", " return str(obj)\n", " return super().default(obj)\n" ], "metadata": { "collapsed": false } }, { "cell_type": "code", "source": [ "def add_model_fn(example_text, cover, SpeakerID, name_en, name_cn, language):\n", "\n", "\n", "\n", " # 检查必填字段是否为空\n", " if not speaker_id or not name_en or not language:\n", " raise gr.Error(\"Please fill in all required fields!\")\n", " return \"Failed to add model\"\n", "\n", "\n", "\n", "\n", " ### 保存上传的文件\n", "\n", " # 生成文件路径\n", " model_save_dir = Path(\"models\")\n", " model_save_dir = model_save_dir / name_en\n", " img_save_dir = model_save_dir\n", " model_save_dir.mkdir(parents=True, exist_ok=True)\n", "\n", " #shutil.copyfile(file.value,Model_save_path)\n", "\n", " # file_data = file.data[0]\n", " # filename = secure_filename(file_data.name)\n", " # filepath = os.path.join(\"models\", name_en, filename)\n", " # os.makedirs(os.path.dirname(filepath), exist_ok=True)\n", "\n", "\n", " # 保存checkpoints 和 cover\n", " #tensor = torch.FloatTensor(file)\n", " Model_name = name_en + \".pth\"\n", " model_save_dir = model_save_dir / Model_name\n", " #torch.save(tensor, Model_save_path)\n", "\n", " #\n", " # # convert to RGB format if necessary\n", " # if len(img.shape) == 2 or img.shape[2] == 1:\n", " # img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGBA)\n", " # else:\n", " # img = cv2.cvtColor(img, cv2.COLOR_BGR2RGBA)\n", " #\n", " # cv2.imwrite(str(img_save_dir / \"cover.png\"), img)\n", "\n", " if cover is not None:\n", " img = np.array(cover)\n", " img = Image.fromarray(img)\n", " img.save(os.path.join(img_save_dir, 'cover.png'))\n", "\n", "\n", " \n", " #获取用户输入\n", " new_model = {\n", " \"name_en\": name_en,\n", " \"name_zh\": name_cn,\n", " \"cover\": img_save_dir / \"cover.png\",\n", " \"sid\": SpeakerID,\n", " \"example\": \"それに新しいお菓子屋さんも出来てみんな買いものを楽しんでいます!\",\n", " \"language\": language,\n", " \"type\": \"single\",\n", " \"model_path\": model_save_dir\n", " }\n", "\n", "\n", "\n", " with open(\"models/model_info.json\", \"r\", encoding=\"utf-8\") as f:\n", " models_info = json.load(f)\n", "\n", " models_info[name_en] = new_model\n", " with open(\"models/model_info.json\", \"w\") as f:\n", " json.dump(models_info, f, cls=CustomEncoder)\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", " #return file.\n", " return \"Success\"\n" ], "metadata": { "id": "3dynM_kkBytx" }, "execution_count": 105, "outputs": [] }, { "cell_type": "code", "execution_count": 102, "outputs": [], "source": [ "def clear_add_model_info():\n", " return \"\",None,\"\",\"\",\"\",\"\"" ], "metadata": { "collapsed": false } }, { "cell_type": "code", "execution_count": 104, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Running on local URL: http://127.0.0.1:7880\n", "\n", "To create a public link, set `share=True` in `launch()`.\n" ] }, { "data": { "text/plain": "", "text/html": "
" }, "metadata": {}, "output_type": "display_data" }, { "name": "stdout", "output_type": "stream", "text": [ "Keyboard interruption in main thread... closing server.\n" ] }, { "data": { "text/plain": "" }, "execution_count": 104, "metadata": {}, "output_type": "execute_result" } ], "source": [ "theme = gr.themes.Base()\n", "\n", "with gr.Blocks(theme=theme) as interface:\n", " with gr.Tab(\"Settings\"):\n", " with gr.Box():\n", " gr.Markdown(\"\"\"# Select Model\"\"\")\n", " with gr.Row():\n", "\n", " with gr.Column(scale = 5):\n", " model_choice = gr.Dropdown(label = \"Model\",\n", " choices=[(model[\"name_en\"]) for name, model in models_info.items()],\n", " interactive=True,\n", " value=models_info['yuuka']['name_en']\n", " )\n", " with gr.Column(scale = 5):\n", " speaker_id = gr.Dropdown(label = \"Speaker ID\",\n", " choices=[(str(model[\"sid\"])) for name, model in models_info.items()],\n", " interactive=True,\n", " value=str(models_info['yuuka']['sid'])\n", " )\n", "\n", " with gr.Column(scale = 1):\n", " refresh_button = gr.Button(\"Refresh\", variant=\"primary\")\n", " reset_button = gr.Button(\"Reset\")\n", "\n", " with gr.Box():\n", " gr.Markdown(\"# Add Model\\n\"\n", " \"> *为必填选项\\n\"\n", " \"> 添加完成后将**checkpoints**文件放到对应生成的文件夹中\"\n", " )\n", "\n", "\n", " with gr.Row():\n", " # file = gr.Files(label = \"VITS Model*\", file_types=[\".pth\"])\n", " example_text = gr.Textbox(label = \"Example Text\",\n", " lines=16,\n", " placeholder=\"Enter the example text here\",)\n", " model_cover = gr.Image(label = \"Cover\")\n", "\n", " with gr.Column():\n", " model_speaker_id = gr.Textbox(label = \"Speaker List*\",\n", " placeholder=\"Single speaker model default=0\")\n", " model_name_en = gr.Textbox(label = \"name_en*\")\n", " model_name_cn = gr.Textbox(label = \"name_cn\")\n", " model_language = gr.Dropdown(label = \"Language*\",\n", " choices=LANGUAGES,\n", " interactive=True)\n", " with gr.Row():\n", " add_model_button = gr.Button(\"Add Model\", variant=\"primary\")\n", " clear_add_model_button = gr.Button(\"Clear\")\n", " with gr.Box():\n", " with gr.Row():\n", " message_box = gr.Textbox(label = \"Message\")\n", "\n", "\n", "\n", " add_model_button.click(add_model_fn,\n", " inputs = [example_text, model_cover, model_speaker_id, model_name_en, model_name_cn, model_language],\n", " outputs = message_box\n", " )\n", " clear_add_model_button.click(clear_add_model_info,\n", " outputs = [example_text, model_cover, model_speaker_id, model_name_en, model_name_cn, model_language]\n", " )\n", "\n", "interface.queue(concurrency_count=1).launch(debug=True)" ], "metadata": { "collapsed": false } }, { "cell_type": "code", "execution_count": 62, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Running on local URL: http://127.0.0.1:7866\n", "\n", "To create a public link, set `share=True` in `launch()`.\n" ] }, { "data": { "text/plain": "", "text/html": "
" }, "metadata": {}, "output_type": "display_data" }, { "name": "stderr", "output_type": "stream", "text": [ "Traceback (most recent call last):\n", " File \"D:\\Anaconda\\envs\\ML\\lib\\site-packages\\gradio\\routes.py\", line 394, in run_predict\n", " output = await app.get_blocks().process_api(\n", " File \"D:\\Anaconda\\envs\\ML\\lib\\site-packages\\gradio\\blocks.py\", line 1075, in process_api\n", " result = await self.call_function(\n", " File \"D:\\Anaconda\\envs\\ML\\lib\\site-packages\\gradio\\blocks.py\", line 884, in call_function\n", " prediction = await anyio.to_thread.run_sync(\n", " File \"D:\\Anaconda\\envs\\ML\\lib\\site-packages\\anyio\\to_thread.py\", line 31, in run_sync\n", " return await get_asynclib().run_sync_in_worker_thread(\n", " File \"D:\\Anaconda\\envs\\ML\\lib\\site-packages\\anyio\\_backends\\_asyncio.py\", line 937, in run_sync_in_worker_thread\n", " return await future\n", " File \"D:\\Anaconda\\envs\\ML\\lib\\site-packages\\anyio\\_backends\\_asyncio.py\", line 867, in run\n", " result = context.run(func, *args)\n", " File \"C:\\Users\\l4227\\AppData\\Local\\Temp\\ipykernel_11412\\3513495185.py\", line 4, in file_upload\n", " return file.name\n", "AttributeError: 'list' object has no attribute 'name'\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Keyboard interruption in main thread... closing server.\n" ] }, { "data": { "text/plain": "" }, "execution_count": 62, "metadata": {}, "output_type": "execute_result" } ], "source": [ "import gradio as gr\n", "\n", "def file_upload(file):\n", " return file.name\n", "\n", "\n", "with gr.Blocks() as interface:\n", "\n", " a = gr.Files(label = \"VITS Model*\", file_types=[\".pth\"])\n", " b = gr.Files(label = \"Cover\", file_types=[\".png\"])\n", " c = gr.Button()\n", " d = gr.Textbox()\n", "\n", " c.click(fn=file_upload,inputs=a,outputs=d)\n", "\n", "\n", "interface.queue(concurrency_count=1).launch(debug=True)" ], "metadata": { "collapsed": false } }, { "cell_type": "code", "execution_count": 66, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Running on local URL: http://127.0.0.1:7867\n", "\n", "To create a public link, set `share=True` in `launch()`.\n" ] }, { "data": { "text/plain": "", "text/html": "
" }, "metadata": {}, "output_type": "display_data" }, { "data": { "text/plain": "" }, "execution_count": 66, "metadata": {}, "output_type": "execute_result" } ], "source": [ "import gradio as gr\n", "\n", "def file_upload(file):\n", " return file.name\n", "\n", "iface = gr.Interface(fn=file_upload, inputs=\"file\", outputs=\"text\")\n", "iface.launch()" ], "metadata": { "collapsed": false } }, { "cell_type": "code", "execution_count": 65, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "D:\\Anaconda\\envs\\ML\\lib\\site-packages\\gradio\\deprecation.py:40: UserWarning: `optional` parameter is deprecated, and it has no effect\n", " warnings.warn(value)\n", "D:\\Anaconda\\envs\\ML\\lib\\site-packages\\gradio\\deprecation.py:40: UserWarning: `keep_filename` parameter is deprecated, and it has no effect\n", " warnings.warn(value)\n" ] }, { "ename": "AttributeError", "evalue": "module 'gradio.outputs' has no attribute 'Text'", "output_type": "error", "traceback": [ "\u001B[1;31m---------------------------------------------------------------------------\u001B[0m", "\u001B[1;31mAttributeError\u001B[0m Traceback (most recent call last)", "Cell \u001B[1;32mIn[65], line 8\u001B[0m\n\u001B[0;32m 5\u001B[0m \u001B[38;5;28;01mreturn\u001B[39;00m \u001B[38;5;124m\"\u001B[39m\u001B[38;5;124m/path/to/output/file.txt\u001B[39m\u001B[38;5;124m\"\u001B[39m\n\u001B[0;32m 7\u001B[0m input_file \u001B[38;5;241m=\u001B[39m gr\u001B[38;5;241m.\u001B[39minputs\u001B[38;5;241m.\u001B[39mFile()\n\u001B[1;32m----> 8\u001B[0m output_file_path \u001B[38;5;241m=\u001B[39m \u001B[43mgr\u001B[49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43moutputs\u001B[49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mText\u001B[49m(\u001B[38;5;28mtype\u001B[39m\u001B[38;5;241m=\u001B[39m\u001B[38;5;124m\"\u001B[39m\u001B[38;5;124mfile\u001B[39m\u001B[38;5;124m\"\u001B[39m)\n\u001B[0;32m 10\u001B[0m gr\u001B[38;5;241m.\u001B[39mInterface(get_file_path, inputs\u001B[38;5;241m=\u001B[39minput_file, outputs\u001B[38;5;241m=\u001B[39moutput_file_path)\u001B[38;5;241m.\u001B[39mlaunch()\n", "\u001B[1;31mAttributeError\u001B[0m: module 'gradio.outputs' has no attribute 'Text'" ] } ], "source": [ "import gradio as gr\n", "\n", "def get_file_path(input_file):\n", " # do something with input file\n", " return \"/path/to/output/file.txt\"\n", "\n", "input_file = gr.inputs.File()\n", "output_file_path = gr.outputs.Text(type=\"file\")\n", "\n", "gr.Interface(get_file_path, inputs=input_file, outputs=output_file_path).launch()\n" ], "metadata": { "collapsed": false } }, { "cell_type": "code", "execution_count": null, "outputs": [], "source": [], "metadata": { "collapsed": false } } ] }