{ "nbformat": 4, "nbformat_minor": 0, "metadata": { "colab": { "provenance": [] }, "kernelspec": { "name": "python3", "display_name": "Python 3" }, "language_info": { "name": "python" } }, "cells": [ { "cell_type": "code", "source": [ "!pip install --upgrade typing-extensions" ], "metadata": { "id": "-b_6DpRU-uqm" }, "execution_count": null, "outputs": [] }, { "cell_type": "markdown", "source": [ "## **下載whisper套件**" ], "metadata": { "id": "zAi-ZzbbZrjL" } }, { "cell_type": "code", "source": [ "!pip install gradio" ], "metadata": { "id": "fBeLbJPAaire" }, "execution_count": null, "outputs": [] }, { "cell_type": "code", "source": [ "!pip install -U openai-whisper" ], "metadata": { "id": "BdhO4TAacxw3", "colab": { "base_uri": "https://localhost:8080/" }, "outputId": "fe28e610-183a-4f34-9074-833a22c4cbbe" }, "execution_count": null, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "Collecting openai-whisper\n", " Downloading openai-whisper-20231106.tar.gz (798 kB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m798.6/798.6 kB\u001b[0m \u001b[31m4.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25h Installing build dependencies ... \u001b[?25l\u001b[?25hdone\n", " Getting requirements to build wheel ... \u001b[?25l\u001b[?25hdone\n", " Preparing metadata (pyproject.toml) ... \u001b[?25l\u001b[?25hdone\n", "Collecting triton==2.0.0 (from openai-whisper)\n", " Downloading triton-2.0.0-1-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl (63.3 MB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m63.3/63.3 MB\u001b[0m \u001b[31m7.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25hRequirement already satisfied: numba in /usr/local/lib/python3.10/dist-packages (from openai-whisper) (0.58.1)\n", "Requirement already satisfied: numpy in /usr/local/lib/python3.10/dist-packages (from openai-whisper) (1.23.5)\n", "Requirement already satisfied: torch in /usr/local/lib/python3.10/dist-packages (from openai-whisper) (2.1.0+cu118)\n", "Requirement already satisfied: tqdm in /usr/local/lib/python3.10/dist-packages (from openai-whisper) (4.66.1)\n", "Requirement already satisfied: more-itertools in /usr/local/lib/python3.10/dist-packages (from openai-whisper) (10.1.0)\n", "Collecting tiktoken (from openai-whisper)\n", " Downloading tiktoken-0.5.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (2.0 MB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m2.0/2.0 MB\u001b[0m \u001b[31m66.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25hRequirement already satisfied: cmake in /usr/local/lib/python3.10/dist-packages (from triton==2.0.0->openai-whisper) (3.27.7)\n", "Requirement already satisfied: filelock in /usr/local/lib/python3.10/dist-packages (from triton==2.0.0->openai-whisper) (3.13.1)\n", "Collecting lit (from triton==2.0.0->openai-whisper)\n", " Downloading lit-17.0.5.tar.gz (153 kB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m153.0/153.0 kB\u001b[0m \u001b[31m17.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25h Installing build dependencies ... \u001b[?25l\u001b[?25hdone\n", " Getting requirements to build wheel ... \u001b[?25l\u001b[?25hdone\n", " Installing backend dependencies ... \u001b[?25l\u001b[?25hdone\n", " Preparing metadata (pyproject.toml) ... \u001b[?25l\u001b[?25hdone\n", "Requirement already satisfied: llvmlite<0.42,>=0.41.0dev0 in /usr/local/lib/python3.10/dist-packages (from numba->openai-whisper) (0.41.1)\n", "Requirement already satisfied: regex>=2022.1.18 in /usr/local/lib/python3.10/dist-packages (from tiktoken->openai-whisper) (2023.6.3)\n", "Requirement already satisfied: requests>=2.26.0 in /usr/local/lib/python3.10/dist-packages (from tiktoken->openai-whisper) (2.31.0)\n", "Requirement already satisfied: typing-extensions in /usr/local/lib/python3.10/dist-packages (from torch->openai-whisper) (4.8.0)\n", "Requirement already satisfied: sympy in /usr/local/lib/python3.10/dist-packages (from torch->openai-whisper) (1.12)\n", "Requirement already satisfied: networkx in /usr/local/lib/python3.10/dist-packages (from torch->openai-whisper) (3.2.1)\n", "Requirement already satisfied: jinja2 in /usr/local/lib/python3.10/dist-packages (from torch->openai-whisper) (3.1.2)\n", "Requirement already satisfied: fsspec in /usr/local/lib/python3.10/dist-packages (from torch->openai-whisper) (2023.6.0)\n", "INFO: pip is looking at multiple versions of torch to determine which version is compatible with other requirements. This could take a while.\n", "Collecting torch (from openai-whisper)\n", " Downloading torch-2.1.0-cp310-cp310-manylinux1_x86_64.whl (670.2 MB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m670.2/670.2 MB\u001b[0m \u001b[31m685.8 kB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25hCollecting nvidia-cuda-nvrtc-cu12==12.1.105 (from torch->openai-whisper)\n", " Downloading nvidia_cuda_nvrtc_cu12-12.1.105-py3-none-manylinux1_x86_64.whl (23.7 MB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m23.7/23.7 MB\u001b[0m \u001b[31m23.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25hCollecting nvidia-cuda-runtime-cu12==12.1.105 (from torch->openai-whisper)\n", " Downloading nvidia_cuda_runtime_cu12-12.1.105-py3-none-manylinux1_x86_64.whl (823 kB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m823.6/823.6 kB\u001b[0m \u001b[31m33.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25hCollecting nvidia-cuda-cupti-cu12==12.1.105 (from torch->openai-whisper)\n", " Downloading nvidia_cuda_cupti_cu12-12.1.105-py3-none-manylinux1_x86_64.whl (14.1 MB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m14.1/14.1 MB\u001b[0m \u001b[31m41.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25hCollecting nvidia-cudnn-cu12==8.9.2.26 (from torch->openai-whisper)\n", " Downloading nvidia_cudnn_cu12-8.9.2.26-py3-none-manylinux1_x86_64.whl (731.7 MB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m731.7/731.7 MB\u001b[0m \u001b[31m1.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25hCollecting nvidia-cublas-cu12==12.1.3.1 (from torch->openai-whisper)\n", " Downloading nvidia_cublas_cu12-12.1.3.1-py3-none-manylinux1_x86_64.whl (410.6 MB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m410.6/410.6 MB\u001b[0m \u001b[31m2.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25hCollecting nvidia-cufft-cu12==11.0.2.54 (from torch->openai-whisper)\n", " Downloading nvidia_cufft_cu12-11.0.2.54-py3-none-manylinux1_x86_64.whl (121.6 MB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m121.6/121.6 MB\u001b[0m \u001b[31m8.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25hCollecting nvidia-curand-cu12==10.3.2.106 (from torch->openai-whisper)\n", " Downloading nvidia_curand_cu12-10.3.2.106-py3-none-manylinux1_x86_64.whl (56.5 MB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m56.5/56.5 MB\u001b[0m \u001b[31m9.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25hCollecting nvidia-cusolver-cu12==11.4.5.107 (from torch->openai-whisper)\n", " Downloading nvidia_cusolver_cu12-11.4.5.107-py3-none-manylinux1_x86_64.whl (124.2 MB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m124.2/124.2 MB\u001b[0m \u001b[31m6.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25hCollecting nvidia-cusparse-cu12==12.1.0.106 (from torch->openai-whisper)\n", " Downloading nvidia_cusparse_cu12-12.1.0.106-py3-none-manylinux1_x86_64.whl (196.0 MB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m196.0/196.0 MB\u001b[0m \u001b[31m3.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25hCollecting nvidia-nccl-cu12==2.18.1 (from torch->openai-whisper)\n", " Downloading nvidia_nccl_cu12-2.18.1-py3-none-manylinux1_x86_64.whl (209.8 MB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m209.8/209.8 MB\u001b[0m \u001b[31m1.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25hCollecting nvidia-nvtx-cu12==12.1.105 (from torch->openai-whisper)\n", " Downloading nvidia_nvtx_cu12-12.1.105-py3-none-manylinux1_x86_64.whl (99 kB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m99.1/99.1 kB\u001b[0m \u001b[31m10.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25hCollecting torch (from openai-whisper)\n", " Downloading torch-2.0.1-cp310-cp310-manylinux1_x86_64.whl (619.9 MB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m619.9/619.9 MB\u001b[0m \u001b[31m1.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25hCollecting nvidia-cuda-nvrtc-cu11==11.7.99 (from torch->openai-whisper)\n", " Downloading nvidia_cuda_nvrtc_cu11-11.7.99-2-py3-none-manylinux1_x86_64.whl (21.0 MB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m21.0/21.0 MB\u001b[0m \u001b[31m53.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25hCollecting nvidia-cuda-runtime-cu11==11.7.99 (from torch->openai-whisper)\n", " Downloading nvidia_cuda_runtime_cu11-11.7.99-py3-none-manylinux1_x86_64.whl (849 kB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m849.3/849.3 kB\u001b[0m \u001b[31m48.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25hCollecting nvidia-cuda-cupti-cu11==11.7.101 (from torch->openai-whisper)\n", " Downloading nvidia_cuda_cupti_cu11-11.7.101-py3-none-manylinux1_x86_64.whl (11.8 MB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m11.8/11.8 MB\u001b[0m \u001b[31m73.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25hCollecting nvidia-cudnn-cu11==8.5.0.96 (from torch->openai-whisper)\n", " Downloading nvidia_cudnn_cu11-8.5.0.96-2-py3-none-manylinux1_x86_64.whl (557.1 MB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m557.1/557.1 MB\u001b[0m \u001b[31m2.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25hCollecting nvidia-cublas-cu11==11.10.3.66 (from torch->openai-whisper)\n", " Downloading nvidia_cublas_cu11-11.10.3.66-py3-none-manylinux1_x86_64.whl (317.1 MB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m317.1/317.1 MB\u001b[0m \u001b[31m3.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25hCollecting nvidia-cufft-cu11==10.9.0.58 (from torch->openai-whisper)\n", " Downloading nvidia_cufft_cu11-10.9.0.58-py3-none-manylinux1_x86_64.whl (168.4 MB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m168.4/168.4 MB\u001b[0m \u001b[31m4.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25hCollecting nvidia-curand-cu11==10.2.10.91 (from torch->openai-whisper)\n", " Downloading nvidia_curand_cu11-10.2.10.91-py3-none-manylinux1_x86_64.whl (54.6 MB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m54.6/54.6 MB\u001b[0m \u001b[31m10.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25hCollecting nvidia-cusolver-cu11==11.4.0.1 (from torch->openai-whisper)\n", " Downloading nvidia_cusolver_cu11-11.4.0.1-2-py3-none-manylinux1_x86_64.whl (102.6 MB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m102.6/102.6 MB\u001b[0m \u001b[31m7.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25hCollecting nvidia-cusparse-cu11==11.7.4.91 (from torch->openai-whisper)\n", " Downloading nvidia_cusparse_cu11-11.7.4.91-py3-none-manylinux1_x86_64.whl (173.2 MB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m173.2/173.2 MB\u001b[0m \u001b[31m4.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25hCollecting nvidia-nccl-cu11==2.14.3 (from torch->openai-whisper)\n", " Downloading nvidia_nccl_cu11-2.14.3-py3-none-manylinux1_x86_64.whl (177.1 MB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m177.1/177.1 MB\u001b[0m \u001b[31m7.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25hCollecting nvidia-nvtx-cu11==11.7.91 (from torch->openai-whisper)\n", " Downloading nvidia_nvtx_cu11-11.7.91-py3-none-manylinux1_x86_64.whl (98 kB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m98.6/98.6 kB\u001b[0m \u001b[31m11.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25hRequirement already satisfied: setuptools in /usr/local/lib/python3.10/dist-packages (from nvidia-cublas-cu11==11.10.3.66->torch->openai-whisper) (67.7.2)\n", "Requirement already satisfied: wheel in /usr/local/lib/python3.10/dist-packages (from nvidia-cublas-cu11==11.10.3.66->torch->openai-whisper) (0.41.3)\n", "Requirement already satisfied: charset-normalizer<4,>=2 in /usr/local/lib/python3.10/dist-packages (from requests>=2.26.0->tiktoken->openai-whisper) (3.3.2)\n", "Requirement already satisfied: idna<4,>=2.5 in /usr/local/lib/python3.10/dist-packages (from requests>=2.26.0->tiktoken->openai-whisper) (3.4)\n", "Requirement already satisfied: urllib3<3,>=1.21.1 in /usr/local/lib/python3.10/dist-packages (from requests>=2.26.0->tiktoken->openai-whisper) (2.0.7)\n", "Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.10/dist-packages (from requests>=2.26.0->tiktoken->openai-whisper) (2023.7.22)\n", "Requirement already satisfied: MarkupSafe>=2.0 in /usr/local/lib/python3.10/dist-packages (from jinja2->torch->openai-whisper) (2.1.3)\n", "Requirement already satisfied: mpmath>=0.19 in /usr/local/lib/python3.10/dist-packages (from sympy->torch->openai-whisper) (1.3.0)\n", "Building wheels for collected packages: openai-whisper, lit\n", " Building wheel for openai-whisper (pyproject.toml) ... \u001b[?25l\u001b[?25hdone\n", " Created wheel for openai-whisper: filename=openai_whisper-20231106-py3-none-any.whl size=801353 sha256=75c2804d66bc4cb25a22abe172c293305544503d8fb88bd8f51308a86987fc2e\n", " Stored in directory: /root/.cache/pip/wheels/e6/f6/72/ce51aa2af2b82a54decb6e20e211de3e4787f8a44898a81340\n", " Building wheel for lit (pyproject.toml) ... \u001b[?25l\u001b[?25hdone\n", " Created wheel for lit: filename=lit-17.0.5-py3-none-any.whl size=93256 sha256=a7bd96df969acbfd228722535dacbf0939f63b81c819e8bb15727e1766d880e1\n", " Stored in directory: /root/.cache/pip/wheels/1c/87/8e/5a42c0d4be23362b68bbff33b17f3c35a3df44f1cd2f5a24b4\n", "Successfully built openai-whisper lit\n", "Installing collected packages: lit, nvidia-nvtx-cu11, nvidia-nccl-cu11, nvidia-cusparse-cu11, nvidia-curand-cu11, nvidia-cufft-cu11, nvidia-cuda-runtime-cu11, nvidia-cuda-nvrtc-cu11, nvidia-cuda-cupti-cu11, nvidia-cublas-cu11, tiktoken, nvidia-cusolver-cu11, nvidia-cudnn-cu11, triton, torch, openai-whisper\n", " Attempting uninstall: triton\n", " Found existing installation: triton 2.1.0\n", " Uninstalling triton-2.1.0:\n", " Successfully uninstalled triton-2.1.0\n", " Attempting uninstall: torch\n", " Found existing installation: torch 2.1.0+cu118\n", " Uninstalling torch-2.1.0+cu118:\n" ] } ] }, { "cell_type": "markdown", "source": [ "# **將軟體包更新到最新版本**" ], "metadata": { "id": "rh2gr_ySZndV" } }, { "cell_type": "code", "source": [ "!pip install --upgrade --no-deps --force-reinstall git+https://github.com/openai/whisper.git" ], "metadata": { "id": "oZTj2_eVcyWA" }, "execution_count": null, "outputs": [] }, { "cell_type": "markdown", "source": [ "# **以下命令將使用medium模型在音訊檔案中轉錄語音**\n" ], "metadata": { "id": "PgaD3q4OZcfr" } }, { "cell_type": "code", "source": [ "!whisper audio.flac audio.mp3 audio.wav --model medium" ], "metadata": { "id": "BkRtgTCodyH-" }, "execution_count": null, "outputs": [] }, { "cell_type": "markdown", "source": [ "# **下載中文語言包**" ], "metadata": { "id": "Qtn89HW-Z5-y" } }, { "cell_type": "code", "source": [ "!whisper chinese.wav --language Chinese" ], "metadata": { "id": "mpgdHsXGo7BZ" }, "execution_count": null, "outputs": [] }, { "cell_type": "markdown", "source": [ "# **可將語言翻譯成英文**" ], "metadata": { "id": "LKlGrnh2a5jx" } }, { "cell_type": "code", "source": [ "!whisper chinese.wav --language Chinese --task translate" ], "metadata": { "id": "ZWV92M68atLr" }, "execution_count": null, "outputs": [] }, { "cell_type": "markdown", "source": [ "# **下載翻譯API**" ], "metadata": { "id": "lLMe8B9aZiim" } }, { "cell_type": "code", "source": [ "\n", "!pip install openai" ], "metadata": { "id": "bADHSOBhvV04" }, "execution_count": null, "outputs": [] }, { "cell_type": "markdown", "source": [ "# **匯入檔案的CODE**" ], "metadata": { "id": "KtJj-H2vZpzf" } }, { "cell_type": "code", "source": [ "!pip install gradio_client" ], "metadata": { "id": "qeHuyE41cmIl" }, "execution_count": null, "outputs": [] }, { "cell_type": "markdown", "source": [ "#正確的\n" ], "metadata": { "id": "ijGRue6X4Yuk" } }, { "cell_type": "code", "source": [ "!git clone https://huggingface.co/spaces/openai/whisper\n", "%cd whisper\n", "!pip install -r requirements.txt" ], "metadata": { "id": "zGnrVKblRfXu" }, "execution_count": null, "outputs": [] }, { "cell_type": "code", "source": [ "import os\n", "os.system(\"pip install git+https://github.com/openai/whisper.git\")\n", "import gradio as gr\n", "import whisper\n", "import soundfile as sf\n", "\n", "\n", "\n", "model = whisper.load_model(\"large\")\n", "\n", "\n", "\n", "def inference(audio):\n", " audio = whisper.load_audio(audio)\n", " audio = whisper.pad_or_trim(audio)\n", "\n", " mel = whisper.log_mel_spectrogram(audio).to(model.device)\n", "\n", " _, probs = model.detect_language(mel)\n", "\n", " options = whisper.DecodingOptions(fp16 = False)\n", " result = whisper.decode(model, mel, options)\n", "\n", " print(result.text)\n", " return result.text, gr.update(visible=True), gr.update(visible=True), gr.update(visible=True)\n", "\n", "iface = gr.Interface(\n", " fn=inference,\n", " inputs=gr.Audio(type=\"filepath\", label=\"上传音频文件 (.mp3, .wav等)\"),\n", " outputs=\"text\"\n", ")\n", "\n", "# 启动 Gradio 界面\n", "iface.launch()\n" ], "metadata": { "id": "XNHE7YBAQtQd" }, "execution_count": null, "outputs": [] }, { "cell_type": "code", "source": [ "pip install transformers\n" ], "metadata": { "id": "SQsjMmicSV0m" }, "execution_count": null, "outputs": [] }, { "cell_type": "code", "source": [ "Chinese_to_english = model.transcribe(audio,task='translate')\n", "print(Chinese_to_english[\"text\"])" ], "metadata": { "id": "5i1LGotLbqip", "colab": { "base_uri": "https://localhost:8080/" }, "outputId": "111369a7-bd4e-4843-9737-a39c468012ec" }, "execution_count": null, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ " If you find it very complicated, then you must not worry about it. Don't just go into the mind to think that it is complicated, but in fact, it is even more complicated than you think. If we are not able to understand the meaning of the words of the words today, then we will not understand it yesterday. If you have found out that the meaning of the words of the words of the words of the four words, please let me know. Hello everyone, I am 18 years old today.\n" ] } ] }, { "cell_type": "markdown", "source": [ "# **輸入YouTube網址來實現語音轉文字中英文皆可**" ], "metadata": { "id": "3lUCRSkhZ9kQ" } }, { "cell_type": "code", "source": [ "import whisper\n", "import torch\n", "import os" ], "metadata": { "id": "owz-tvQioMvL" }, "execution_count": 13, "outputs": [] }, { "cell_type": "code", "source": [ "device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n", "device" ], "metadata": { "colab": { "base_uri": "https://localhost:8080/", "height": 35 }, "id": "kK_fJz-CRv6O", "outputId": "5f80f175-9de8-4035-9d67-4cc684859f41" }, "execution_count": 14, "outputs": [ { "output_type": "execute_result", "data": { "text/plain": [ "'cuda'" ], "application/vnd.google.colaboratory.intrinsic+json": { "type": "string" } }, "metadata": {}, "execution_count": 14 } ] }, { "cell_type": "code", "source": [ "whisper_model = whisper.load_model(\"large\", device=device)" ], "metadata": { "id": "POMpkyjQR9IN" }, "execution_count": null, "outputs": [] }, { "cell_type": "code", "source": [ "!pip -qqq install pytube" ], "metadata": { "id": "H5lr_kCtSJyF", "colab": { "base_uri": "https://localhost:8080/" }, "outputId": "975e6a4b-bc3b-45c9-c897-5ae981815da1" }, "execution_count": null, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "\u001b[?25l \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m0.0/57.6 kB\u001b[0m \u001b[31m?\u001b[0m eta \u001b[36m-:--:--\u001b[0m\r\u001b[2K \u001b[91m━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[90m╺\u001b[0m\u001b[90m━━━━━━━━━━━\u001b[0m \u001b[32m41.0/57.6 kB\u001b[0m \u001b[31m1.2 MB/s\u001b[0m eta \u001b[36m0:00:01\u001b[0m\r\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m57.6/57.6 kB\u001b[0m \u001b[31m1.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25h" ] } ] }, { "cell_type": "code", "source": [ "from pytube import YouTube\n", "def video_to_audio(vedio_URL,destination,final_filename):\n", " vedio = YouTube(vedio_URL)\n", "\n", " audio = vedio.streams.filter(only_audio=True).first()\n", "\n", " output = audio.download(output_path = destination)\n", "\n", " _,ext = os.path.splitext (output)\n", " new_file = final_filename + '.mp3'\n", "\n", " os.rename(output,new_file)" ], "metadata": { "id": "jZql4_M5SSMq" }, "execution_count": null, "outputs": [] }, { "cell_type": "code", "source": [ "video_URL = 'https://www.youtube.com/watch?v=s0RB1X1YYdU' #網址位置\n", "destination =\".\"\n", "final_filename = \"motivational_speech\" #將影片轉換為mp3\n", "video_to_audio(video_URL, destination, final_filename)" ], "metadata": { "id": "jN10OsquTtEI" }, "execution_count": null, "outputs": [] }, { "cell_type": "code", "source": [ "audio_file=\"motivational_speech.mp3\" #匯入mp3檔案並且語音轉文字\n", "result = whisper_model.transcribe(audio_file)" ], "metadata": { "id": "EhcNzSZFUkgF" }, "execution_count": null, "outputs": [] }, { "cell_type": "code", "source": [ "print(result[\"text\"])" ], "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "yiWLgZzIVwJD", "outputId": "6091e9a1-bf48-47ed-fe18-e84891ca2123" }, "execution_count": null, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "今天是我們在倫敦市區最後一天然後我們接下來就要去倫敦以外的探索那我們因為這樣子租了一台車然後是跑車跑車嗎?不是跑車啦SUV啦那我不開我只開跑車我哪有跟你說跑車SUV因為我們要放五個人的行李對我們是後車廂很大的跑車OK好好好那我們這個都已經先預約好了然後我是選Audi Q7該不會就是那一台吧我個人是蠻喜歡的我剛剛就是看了很多之後我覺得最適合我們因為它後面整個那個後車廂都可以蓋下來這個嗎?對啊那可能預算要三倍好我們來拿車了這個是我們的職業國際駕駛員這是你的卡但是你手上的這是我的卡車子的資料會在他的名字裡面OK我們可以換嗎?我們現在要做的是我們要付款給那台車我們要付款給你OK然後付款給他的車OK所以我的已經付款了你付款了但是你還沒有付款OK所以我們還沒有完成交易明白所以我們現在要付款然後付款給你的車OK明白不知道為什麼新加坡卡這麼沒用這個申請的駕駛人的名字新加坡卡要跟駕駛人名字一樣我好奇我看到Royce在那邊停車一天要租多少錢?我們沒有租車租金最少要租兩天一天要租15000元一天要租15000元一天租15000元喔對哇Peter說這邊是厲害的租車廠對啊這裡都是厲害的車子阿滴說他想要租這台Royce Nice我們剛剛有問一天租6萬塊然後押金60萬對啊你看我們這樣子分下來一個人大概一萬出頭還可以吧還可以我們開在路上一堆女生就直接來大G渣男開大G這就是我們的渣男欸開不起來有開得起來渣男開大G關起來上堂的聲音有這是Joeman教我的Joeman這個渣男看這個跟大家介紹一下我的車賓利哇欸賓利系列大賓利系列欸這個這個怎麼樣寶馬欸這邊真的是奢華區欸對啊來跟大家介紹一下我的車Aston Martin你們的車真的很好這不是全部的對啊我們這裡沒有Ferrari和Lamborghini我們直接偷偷上去那台車對啊我們偷偷上去我們開著沒有人來對太巧了吧欸你看這個渣男視野Yo他們真的變好矮喔這整個世界都變矮了一截你看你就是適合開這種車啊Peter哥哥以你的肌肉量這個就是我的車欸這就是你的車欸超級你的車這應該叫大P它不應該叫大G大P它還開機了現在的狀況是我之前租車都是用我的信用卡我的名字但是是用哥哥的國際駕照所以駕駛人是他但他後來喬喬之後說不可以這樣就是一定要由駕駛人支付跟信用卡所以可能Maybe他會覺得如果是駕駛人出什麼包然後如果我逃跑的話他就沒有任何人可以索賠Maybe是這樣我要上那台賓利了他剛有跟我們說我們可以就是去摸摸啊去看看這些車子摸摸有包括開開嗎應該可以吧我們兩個在我們兩個在這邊大delay整個團體的進度這台是給你開啦不要說對你不好這台給我開喔這台給你開這台是什麼賓利哇哇它這個經典的這個很漂亮的儀表板還有它的這個Clock哇坐起來舒服你看一整個面這樣子簡約設計這樣過去怪色的我要趕快出去剛剛有講嗎我租的是Audi QQ8還是Q7系列就是它後面可以壓下來所以我們五個人的行李都可以放得上去好的我們要上車了這就是我們的Q這是Q7還是Q8這是Q7還是Q8Q8你看它這個後車廂很大它這邊其實是兩個椅子可以拉起來它這個按鈕在這邊就是可以多兩個椅子但是我們把它都往下所以這邊後面可以放大家的行李箱好的租到車了現在在收集它我們要去接大家對它跟Google Maps一樣的這台車真的是滿舒適的非常多空間好那我們待會就是回去民宿接大家之後一路要開到牛津要花一個多小時一個半小時左右的車程然後我們在牛津會有一系列的拍攝結束再去到我們這趟最後的一個點是一個在比較西邊的16世紀的城堡最後的兩天會住在城堡怪色掌聲給Peter哥哥再接一次開車謝謝Peter哥哥這趟幫我們開車我們現在要還車的地方了Peter覺得這次自駕的英國自駕的感想如何我覺得開車比台灣舒服很多但這邊的路比較小對但是整個大家的那個擠壓感比較沒有那麼嚴重因為台灣我大部分開的時候真的機車真的太多它會是有點危險的感覺然後車子也比較不會那麼禮讓對我們在英國好像沒有被巴過對沒有被巴過所以我覺得還蠻舒服的那雖然有的時候會有點小但是我覺得還是比較舒服跟台灣比起來那左駕呢對你來說因為上次去沖繩已經駕過一次了然後已經有練習過了所以這次已經非常輕易的上手了了解我們這次是跟Hertz租車然後我們租的車型是Audi的Q8它就是後面比較大其實我一開始一直在研究的時候我就因為知道有五個人然後五個行李箱所以我就已經找了一個我看起來覺得最大的SUV了因為再往上搭就是那種小卡車就會更貴結果我們的位置還是不太夠對就是後來大謙要抱著自己的行李箱在前面我們才拆了一下但是還算可以啦就是後面整個這樣拆開來才是蠻大的後面蠻大屁股蠻大的我喜歡然後呢我們這樣子租車三天下來一共是4萬2台幣What4萬2對我們會用四個人拆因為Peter哥哥幫忙駕駛所以哥哥跟Alicia算一組沒關係吧所以我們會四個人拆這樣子的話一個人是一萬塊左右等於是三天但是我覺得還是蠻划算的因為你講喔我們第一天等於是我們要我們直接從市區就直接取車然後去到我們要去到的就是比較旁邊的Bristol然後回來的路上我們也可以去到很多可能沒有車子不能去的景點像巨石鎮羅馬浴場所以我覺得整體來說還不錯其實我現在認真覺得什麼東西跟那個馬車比較起來都是很不合適的對所以我剛剛聽到雖然有嚇到但是喔好像在坐馬車對對對這時候大家應該還不知道坐馬車是什麼大家繼續往下看繼續往下看就知道馬車是什麼奇怪的盤子形成了好所以這次是我們的體驗所以蠻推薦大家如果可以就是在倫敦之外想要玩一些其他的城鎮的話可以在倫敦市區租車然後呢像我們一樣最後一天回來機場換車像這樣我們也不用再多一趟Uber的錢因為去那邊就要100塊對對對對來回這樣就200所以少100也算省錢對蠻好的好以上給大家參考好讚那接下來還會繼續阿滴日常我只是今天是我們最後一天了但是只是我把這個最後變成片段我想說租車當作是一集完整的所以接下來還會再日更好幾天這樣子會是繼續帶大家看說我們搭著車子都是去了哪裡玩這樣好拜拜\n" ] } ] }, { "cell_type": "code", "source": [ "Chinese_to_english = whisper_model.transcribe(audio_file,task='translate')" ], "metadata": { "id": "koyK5ySbW-a6" }, "execution_count": null, "outputs": [] }, { "cell_type": "code", "source": [ "print(Chinese_to_english[\"text\"]) #中文轉英文" ], "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "H_jql0n2a4P9", "outputId": "d3d7db2d-5fb8-4ca9-f04f-24dd91d4af39" }, "execution_count": null, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ " Today is our last day in London We're going to explore London We rented a car A sports car A sports car? Not a sports car, an SUV I'm not driving I only drive a sports car I didn't say SUV SUV, because we're putting five people in We're... We're a sports car with a big trunk Oh, okay Okay, we've already made a reservation I'm choosing the Audi Q7 Is it that one? I... I... I personally quite like it I just... After seeing a lot I think it's the most suitable for us Because the whole rear compartment Can be covered This one? This one? That might cost three times the budget Okay, let's get the car This is our... Professional international driver Your card It's my card My card The card unfortunately would have to be in his name Okay Can we change... So what we've done is We've obviously charged the rental to that car Which we're going to have to refund to you Okay And charge everything on his car Okay So mine's already charged We charged the rental But you declined our deposit anyways Okay So we couldn't complete the transactions on there So what we'll do is We'll refund the rental cost And charge him the full rental and deposit on his Okay I don't know why the credit card Can only be used To apply for the driver's name The credit card has to be the same as the driver's name Peter said this is an amazing rental This is an amazing rental This is an amazing rental A-D said he wants to rent this car We just asked This car 60,000 NTD per day 60,000 NTD per day And 600,000 NTD deposit Yeah Look at us Split up like this Each of us is about 10,000 NTD Not bad right? Not bad We'll just drive on the road And a bunch of girls will come Big G Big G for the scumbags This is our scumbag Can't open it Yeah, I can open it Big G for the scumbags Close it The sound of the candy This is what Joven taught me Joven the scumbag Look at this Let me introduce you to my car Bentley Wow Bentley series Big Bentley series Big Bentley series This one How is it? Is it nice? This is really a luxury area Yeah Let me introduce you to my car My car Aston Martin You guys have really nice cars That's not even the full piece Yeah? We don't have the Ferrari From the Lamborghini We'll just sneak up on that car Sneak up on it We'll drive and no one will come What a coincidence Look at this scumbag's view Yo They really got so short The whole world Got a bit shorter Look You're just suitable for this kind of car Peter With your muscles This is my car This is your car Super Li's car This should be called Big P Not Big G Big P He's even driving Wow The current situation is I used my credit card and my name to rent a car But it's with my brother's international driver's license So he's the driver But he later After the bridge Said no Must be paid by the driver With credit card So maybe he thinks If it's the driver What bag And if I run away He has no one to pay Maybe it's like this I'm going to get on that Bentley He just told us We can just go to MoMo Go see these cars Does MoMo include KaiKai? Should be ok We two are here Big delay The progress of the whole group This one is for you Don't say it's bad for you This one is for me This one is for you What is this one Bentley Wow Wow, this classic one Very beautiful The dashboard And his Clock Comfortable to sit Look at the whole face Simple design Go over I'm starving I have to get out of here I have to get out of here I just said I rent Audi Q Q8 or Q7 series It can be pressed down later So we can put our five luggage on it Ok, we're getting on This is our Q Is it Q7 or Q8 Sorry? Is this a Q7 or Q8 Q8 Q8, ok Look at this trunk It's actually two chairs here Can be pulled up The button is here Two more chairs But we put it all down So you can put your luggage in the back Alright Rent a car Now we are getting familiar with it We are going to pick up everyone Uh, yes Yes, it's the same as Google Maps This car is really comfortable A lot of space Then we will be back to the B&B After picking everyone up Drive all the way to Niujin It takes more than an hour About an hour and a half Then we will have a series of shooting in Niujin End Go to our last point Is a castle in the western part of the 16th century The last two days will be in the castle Almost died Applause to Peter I have been driving all the time Thank you Peter for driving for us We are going to the place where we return the car Peter thinks this time How about the British self-driving? I think driving is much more comfortable than Taiwan But the road here is smaller Yes, but the whole Everyone's The sense of compression is not so serious Because most of the time I drive in Taiwan There are too many cars Oh yes yes A bit dangerous feeling Then the car will not be so forgiving Yes, we seem to have not been pulled out in the UK Yes, not pulled out So I feel pretty comfortable Although sometimes it will be a bit small But I think it's more comfortable Compared with Taiwan What about driving? For you Because I went to Chongsheng last time Already married once I have already practiced So this time I have already got on very easily I understand We rented a car with Hertz last time Then we rented the car model is Audi Q8 It's just bigger in the back Actually, when I was studying at the beginning I just know there are five people Then five suitcases So I have found one I think the biggest SUV Because it's the kind of small truck that goes up Will be more expensive As a result, our position is still not enough It was later that Daqian wanted to hold his own suitcase We just took it off in front But it's okay The whole thing is quite big The back is quite big The ass is quite big I am used to it Then we rent a car like this for three days A total of 420,000 Taiwan dollars What 420,000 420,000 We will use four people to guess Because Peter's brother helped drive So brother and Alisha are a group It's okay So we will guess four people In this case, one person is about 10,000 yuan Equals three days But I think it's still quite cost-effective Because you think it's cost-effective You say our first day It's like we want We directly take a car from the city Then go to the one we want to go to Just next to Bristol Then on the way back We can also go to many Maybe there are no places where cars can't go Like Juizhen, Rome, Yuchang So I think it's pretty good overall Actually, I seriously feel What is compared to that horse car Are all the same So I just heard Although there is a scare But it seems to be a horse car Yes yes yes At this time, everyone should still I don't know what a horse car is Everyone continues to look down Just know what a horse car is Strange plate formation Ok so this is our experience So I recommend everyone If you can Just outside London Want to play some other towns You can rent a car in the London city center Then like us Come back to the airport on the last day Slow down So we don't have to go to Uber again Because it costs 100 yuan to go there Yes yes yes 200 like this So less than 100 is also a savings Yes pretty good Ok, for your reference Good Then continue to continue the RT daily I just today is our last day But I just put this The last clip I want to say that the rental car is a complete one So I will continue to follow the sun for several days Will continue to show everyone We are riding a car Where did you go to play? Bye bye\n" ] } ] }, { "cell_type": "markdown", "source": [ "# **語音輸入MP3檔案**" ], "metadata": { "id": "XKMixQyW4QhC" } }, { "cell_type": "code", "source": [ "!pip install sounddevice numpy lameenc" ], "metadata": { "id": "l7yqyy1qbdmv" }, "execution_count": null, "outputs": [] }, { "cell_type": "code", "source": [ "!apt-get install portaudio19-dev" ], "metadata": { "id": "wtgb9Wl46wyq", "colab": { "base_uri": "https://localhost:8080/" }, "outputId": "3a075bd7-277d-426d-fda2-f32585d9a446" }, "execution_count": null, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "Reading package lists... Done\n", "Building dependency tree... Done\n", "Reading state information... Done\n", "portaudio19-dev is already the newest version (19.6.0-1.1).\n", "0 upgraded, 0 newly installed, 0 to remove and 18 not upgraded.\n" ] } ] }, { "cell_type": "code", "source": [ "!pip install --upgrade pip" ], "metadata": { "id": "hh6cb_W664qm", "colab": { "base_uri": "https://localhost:8080/" }, "outputId": "93642d16-838b-47e9-85dd-a3b36b8a2cfc" }, "execution_count": null, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "Requirement already satisfied: pip in /usr/local/lib/python3.10/dist-packages (23.1.2)\n", "Collecting pip\n", " Downloading pip-23.3.1-py3-none-any.whl (2.1 MB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m2.1/2.1 MB\u001b[0m \u001b[31m13.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25hInstalling collected packages: pip\n", " Attempting uninstall: pip\n", " Found existing installation: pip 23.1.2\n", " Uninstalling pip-23.1.2:\n", " Successfully uninstalled pip-23.1.2\n", "Successfully installed pip-23.3.1\n" ] } ] }, { "cell_type": "code", "source": [ "!pip install cython" ], "metadata": { "id": "DGI8C1bQ6-b0", "colab": { "base_uri": "https://localhost:8080/" }, "outputId": "489c9cc1-8bd2-4815-ce31-f9a56b1325db" }, "execution_count": null, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "Requirement already satisfied: cython in /usr/local/lib/python3.10/dist-packages (3.0.4)\n", "\u001b[33mWARNING: Running pip as the 'root' user can result in broken permissions and conflicting behaviour with the system package manager. It is recommended to use a virtual environment instead: https://pip.pypa.io/warnings/venv\u001b[0m\u001b[33m\n", "\u001b[0m" ] } ] }, { "cell_type": "code", "source": [ "!pip install pyaudio" ], "metadata": { "id": "BTKWpfEJ6_2e", "colab": { "base_uri": "https://localhost:8080/" }, "outputId": "f860a86b-a6d3-4b3e-aa45-6ba841521832" }, "execution_count": null, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "Collecting pyaudio\n", " Downloading PyAudio-0.2.13.tar.gz (46 kB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m46.8/46.8 kB\u001b[0m \u001b[31m1.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25h Installing build dependencies ... \u001b[?25l\u001b[?25hdone\n", " Getting requirements to build wheel ... \u001b[?25l\u001b[?25hdone\n", " Preparing metadata (pyproject.toml) ... \u001b[?25l\u001b[?25hdone\n", "Building wheels for collected packages: pyaudio\n", " Building wheel for pyaudio (pyproject.toml) ... \u001b[?25l\u001b[?25hdone\n", " Created wheel for pyaudio: filename=PyAudio-0.2.13-cp310-cp310-linux_x86_64.whl size=63861 sha256=f152caa1824f02c7e10a0e02f1533781033ce64dc665436f011c7bda3bce7e9d\n", " Stored in directory: /root/.cache/pip/wheels/14/f1/c2/d102b4765a82c5a7bb273998dca7e4a53fc58e9a1a516fda81\n", "Successfully built pyaudio\n", "Installing collected packages: pyaudio\n", "Successfully installed pyaudio-0.2.13\n", "\u001b[33mWARNING: Running pip as the 'root' user can result in broken permissions and conflicting behaviour with the system package manager. It is recommended to use a virtual environment instead: https://pip.pypa.io/warnings/venv\u001b[0m\u001b[33m\n", "\u001b[0m" ] } ] }, { "cell_type": "code", "source": [ "import pyaudio\n", "p = pyaudio.PyAudio()\n", "p" ], "metadata": { "id": "iDBdJxq50K3a", "colab": { "base_uri": "https://localhost:8080/" }, "outputId": "53901ff6-23b7-4cdd-d8e1-fb2d48a4bdd0" }, "execution_count": null, "outputs": [ { "output_type": "execute_result", "data": { "text/plain": [ "" ] }, "metadata": {}, "execution_count": 12 } ] }, { "cell_type": "code", "source": [ "pyaudio.__version__" ], "metadata": { "colab": { "base_uri": "https://localhost:8080/", "height": 35 }, "id": "FOYzd_bZUOYZ", "outputId": "339daa34-27da-4565-9221-b6da06359c76" }, "execution_count": null, "outputs": [ { "output_type": "execute_result", "data": { "text/plain": [ "'0.2.13'" ], "application/vnd.google.colaboratory.intrinsic+json": { "type": "string" } }, "metadata": {}, "execution_count": 16 } ] }, { "cell_type": "code", "source": [ "import pyaudio\n", "import time\n", "import threading\n", "import wave\n", "\n", "class Recorder():\n", " def __init__(self,chunk=1024,channels=1,rate=64000):\n", " self.CHUNK = chunk\n", " self.FORMAT = pyaudio.paInt16\n", " self.CHANNELS = channels\n", " self.RATE = rate\n", " self._running = True\n", " self._frames = []\n", " def start(self):\n", " threading._start_new_thread(self.__recording,())\n", " def __recording(self):\n", " self._running = True\n", " self._frames = []\n", " p = pyaudio.PyAudio()\n", " stream = p.open(format=self.FORMAT,channels=self.CHANNELS,rate=self.RATE,input=True,frames_per_buffer=self.CHUNK)\n", " while(self._running):\n", " data = stream.read(self.CHUNK)\n", " self._frames.append(data)\n", "\n", " stream.stop_stream()\n", " stream.close()\n", " p.terminate()\n", "\n", " def stop(self):\n", " self._running = False\n", "\n", " def save(self,filename):\n", "\n", " p = pyaudio.PyAudio()\n", " if not filename.endswith(\".wav\"):\n", " filename = filename + \".wav\"\n", " wf = wave.open(filename,'wb')\n", " wf.setnchannels(self.CHANNELS)\n", " wf.setsampwidth(p.get_sample_size(self.FORMAT))\n", " wf.setframerate(self.RATE)\n", " wf.writeframes(b''.join(self._frames))\n", " wf.close()\n", " print(\"Saved\")\n", "\n", "if __name__ == \"__main__\":\n", "\n", " for i in range(1,4):\n", " a = int(input('請輸入相應數字開始:'))\n", " if a == 1:\n", " rec = Recorder()\n", " begin = time.time()\n", " print(\"Start recording\")\n", " rec.start()\n", " b = int(input('請輸入相應數字停止:'))\n", " if b == 2:\n", " print(\"Stop recording\")\n", " rec.stop()\n", " fina = time.time()\n", " t = fina - begin\n", " print('錄音時間為%ds'%t)\n", " rec.save(\"1_%d.wav\"%i)\n" ], "metadata": { "id": "88gbB1uv77BT", "colab": { "base_uri": "https://localhost:8080/", "height": 381 }, "outputId": "5eaa93e2-5218-4eae-9c04-b4991c163012" }, "execution_count": null, "outputs": [ { "output_type": "error", "ename": "ModuleNotFoundError", "evalue": "ignored", "traceback": [ "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", "\u001b[0;31mModuleNotFoundError\u001b[0m Traceback (most recent call last)", "\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m \u001b[0;32mimport\u001b[0m \u001b[0mpyaudio\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 2\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mtime\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 3\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mthreading\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 4\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mwave\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 5\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;31mModuleNotFoundError\u001b[0m: No module named 'pyaudio'", "", "\u001b[0;31m---------------------------------------------------------------------------\u001b[0;32m\nNOTE: If your import is failing due to a missing package, you can\nmanually install dependencies using either !pip or !apt.\n\nTo view examples of installing some common dependencies, click the\n\"Open Examples\" button below.\n\u001b[0;31m---------------------------------------------------------------------------\u001b[0m\n" ], "errorDetails": { "actions": [ { "action": "open_url", "actionText": "Open Examples", "url": "/notebooks/snippets/importing_libraries.ipynb" } ] } } ] }, { "cell_type": "markdown", "source": [ "# **從wav轉成MP3檔案**" ], "metadata": { "id": "lNqb-16T7Tdi" } }, { "cell_type": "code", "source": [ "import pyaudio\n", "pa = pyaudio.PyAudio()\n", "pa.get_default_output_device_info()" ], "metadata": { "colab": { "base_uri": "https://localhost:8080/", "height": 335 }, "id": "pZc-vIJI95gT", "outputId": "813aa148-d9b1-46a2-bd39-fe324fdcf388" }, "execution_count": null, "outputs": [ { "output_type": "error", "ename": "OSError", "evalue": "ignored", "traceback": [ "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", "\u001b[0;31mOSError\u001b[0m Traceback (most recent call last)", "\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[1;32m 1\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mpyaudio\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 2\u001b[0m \u001b[0mpa\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mpyaudio\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mPyAudio\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 3\u001b[0;31m \u001b[0mpa\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mget_default_output_device_info\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m", "\u001b[0;32m/usr/local/lib/python3.10/dist-packages/pyaudio/__init__.py\u001b[0m in \u001b[0;36mget_default_output_device_info\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m 822\u001b[0m \u001b[0;34m:\u001b[0m\u001b[0mrtype\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0mdict\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 823\u001b[0m \"\"\"\n\u001b[0;32m--> 824\u001b[0;31m \u001b[0mdevice_index\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mpa\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mget_default_output_device\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 825\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mget_device_info_by_index\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdevice_index\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 826\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;31mOSError\u001b[0m: No Default Output Device Available" ] } ] }, { "cell_type": "code", "source": [ "!pip install pydub" ], "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "_acE0NVc7no4", "outputId": "706862fb-d315-457a-9a70-2d8ac93ee89a" }, "execution_count": null, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "Collecting pydub\n", " Downloading pydub-0.25.1-py2.py3-none-any.whl (32 kB)\n", "Installing collected packages: pydub\n", "Successfully installed pydub-0.25.1\n", "\u001b[33mWARNING: Running pip as the 'root' user can result in broken permissions and conflicting behaviour with the system package manager. It is recommended to use a virtual environment instead: https://pip.pypa.io/warnings/venv\u001b[0m\u001b[33m\n", "\u001b[0m" ] } ] }, { "cell_type": "code", "source": [], "metadata": { "id": "dR5XxraszWOg" }, "execution_count": null, "outputs": [] } ] }