{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "e4845372-68f1-4f31-bf33-6c1f931bf695",
   "metadata": {
    "editable": true,
    "slideshow": {
     "slide_type": ""
    },
    "tags": []
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "3.8.10 (default, May 26 2023, 14:05:08) \n",
      "[GCC 9.4.0]\n",
      "/usr/bin/python3\n"
     ]
    }
   ],
   "source": [
    "import sys\n",
    "\n",
    "\n",
    "print(sys.version)\n",
    "print(sys.executable)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "90440576-86e7-4564-b71f-a7343d05b3c5",
   "metadata": {},
   "outputs": [],
   "source": [
    "# %pip install nemo-toolkit==1.20.0\n",
    "# %pip install pytorch-lightning==1.9.5\n",
    "# %pip install nemo2riva==2.12.0"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "586b613d-d0a7-490b-9f7d-5f77881b2d7c",
   "metadata": {},
   "outputs": [],
   "source": [
    "import os\n",
    "NVIDIA_API_KEY = \"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\"\n",
    "os.environ[\"NVIDIA_API_KEY\"] = NVIDIA_API_KEY\n",
    "\n",
    "AUTODL_HOST=\"connect.bjc1.seetacloud.com\"\n",
    "AUTODL_PORT=57160\n",
    "AUTODL_USER=\"root\"\n",
    "AUTODL_PASSWORD=\"ZLWtw0iQsk5i1\"\n",
    "\n",
    "os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"\n",
    "os.environ[\"LANGCHAIN_ENDPOINT\"] = \"https://api.smith.langchain.com\"\n",
    "os.environ[\"LANGCHAIN_API_KEY\"] = \"lsv2_pt_6183eee2addf499f8fd879fc13e69c71_9e61bc7ffc1\"\n",
    "os.environ[\"LANGCHAIN_PROJECT\"] = \"beauty_assistant\""
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "d04a625b-c2c5-4499-9b19-5f938e06d4ba",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home/nvidia/.local/lib/python3.8/site-packages/langchain_nvidia_ai_endpoints/_common.py:208: UserWarning: Found deepseek-ai/deepseek-r1 in available_models, but type is unknown and inference may fail.\n",
      "  warnings.warn(\n"
     ]
    }
   ],
   "source": [
    "from langchain_nvidia_ai_endpoints import NVIDIAEmbeddings\n",
    "from langchain_nvidia_ai_endpoints import ChatNVIDIA\n",
    "\n",
    "embedder = NVIDIAEmbeddings(model=\"NV-Embed-QA\")#ai-embed-qa-4\n",
    "llm_deepseek_r1 = ChatNVIDIA(\n",
    "      model=\"deepseek-ai/deepseek-r1\",\n",
    "      temperature=0.6,\n",
    "      top_p=0.7,\n",
    "      max_tokens=4096,\n",
    "    )"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "2c4c3531-9138-4a2d-8f41-7496d9f0c5c3",
   "metadata": {},
   "outputs": [],
   "source": [
    "from utility import print_with_time"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "062ecc90-455a-4b93-96b8-0ad231618a63",
   "metadata": {},
   "outputs": [],
   "source": [
    "from langchain_text_splitters import RecursiveCharacterTextSplitter\n",
    "from langchain.vectorstores import FAISS\n",
    "from utility import LoadPDF\n",
    "\n",
    "def init_vector_store():\n",
    "    have_store=False\n",
    "    docs=LoadPDF(\"./kdoc\")\n",
    "    for doc in docs:\n",
    "        #print_with_time(doc)\n",
    "        text_splitter = RecursiveCharacterTextSplitter(\n",
    "            # Set a really small chunk size, just to show.\n",
    "            chunk_size=100,\n",
    "            chunk_overlap=20,\n",
    "            length_function=len,\n",
    "            is_separator_regex=False,\n",
    "        )\n",
    "        texts = text_splitter.split_documents(doc)\n",
    "        print_with_time(texts[0])\n",
    "        if not have_store:\n",
    "            store = FAISS.from_documents(texts, embedder)\n",
    "            have_store=True\n",
    "        else:\n",
    "            tmp_store=FAISS.from_documents(texts, embedder)\n",
    "            store.merge_from(tmp_store)\n",
    "    store.save_local('./embedding')\n",
    "    \n",
    "# 初始向量数据库，只需执行一次\n",
    "# init_vector_store()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "id": "86440b2b-f073-4228-ae6c-0b8dacdf6a04",
   "metadata": {},
   "outputs": [],
   "source": [
    "import subprocess\n",
    "\n",
    "def asr(state):\n",
    "    audio_file=state[\"question_audio\"]\n",
    "    cmd=f\"\"\"python3 python-clients/scripts/asr/transcribe_file_offline.py --server grpc.nvcf.nvidia.com:443 --use-ssl  --metadata function-id \"b702f636-f60c-4a3d-a6f4-f3568c13bd7d\" --metadata \"authorization\" \"Bearer {NVIDIA_API_KEY}\"  --language-code en --input-file {audio_file}\"\"\"\n",
    "    print_with_time(cmd)\n",
    "    process  = subprocess.Popen(cmd, shell=True,\n",
    "stdout=subprocess.PIPE,\n",
    "stderr=subprocess.PIPE,\n",
    "encoding='utf-8')\n",
    "    output, errors = process.communicate()\n",
    "    print( output, errors )\n",
    "    arr=output.split(\"Final transcript: \")\n",
    "    if(len(arr)>=2):\n",
    "        return {\"question\":arr[1],\"messages\": [{\"role\": \"user\", \"content\": arr[1]}]}\n",
    "    else:\n",
    "        print_with_time( output )\n",
    "        raise ValueError(\"asr出现错误\")\n",
    "    \n",
    "# s={\"question_audio\":\"./test.wav\"}\n",
    "# r=asr(s)\n",
    "# print(r)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "id": "b4ffbea2-0d54-40cb-8966-3373a19801ae",
   "metadata": {},
   "outputs": [],
   "source": [
    "import uuid\n",
    "from utility import is_file_empty\n",
    "\n",
    "def translate(str):\n",
    "    prompt = ChatPromptTemplate.from_messages(\n",
    "        [\n",
    "            (\n",
    "                \"system\",\n",
    "                \"\"\"你是一个中英文翻译专家，将用户输入的中文翻译成英文。直接翻译，不要解析内容。请简洁回答，不重复用户问题，并控制在100字以内。\n",
    "                \"\"\",\n",
    "            ),\n",
    "            (\"user\", f\"{str}\"),\n",
    "        ]\n",
    "    )\n",
    "    \n",
    "    chain = (\n",
    "        prompt\n",
    "        | llm_deepseek_r1\n",
    "        | StrOutputParser()\n",
    "    )\n",
    "    result=chain.invoke({})\n",
    "    # print_with_time(result)\n",
    "    arr=result.split(\"</think>\")\n",
    "    # print_with_time(arr[-1])\n",
    "    if len(arr[-1])<=0:\n",
    "        print_with_time(result)\n",
    "        raise ValueError(\"中文翻译英文出现错误\")\n",
    "    return arr[-1]\n",
    "    \n",
    "# print(translate(\"春景短詩，捕捉梅香鳥驚、風過花落的瞬間，末句化用「花開花落知多少」感時傷逝，意象靈動，餘韻悠長。\"))\n",
    "\n",
    "def tts(state):\n",
    "    answer=state[\"answer\"].replace(\"'\",\"\").replace('\"','')\n",
    "    if not answer.isalpha():\n",
    "        answer=translate(answer) #接口不支持中文j\n",
    "    audio_path=\"./audio\"\n",
    "    if not os.path.exists(audio_path):\n",
    "        os.makedirs(audio_path)\n",
    "    output_audio=f\"{audio_path}/{uuid.uuid4()}.wav\"\n",
    "    cmd=f\"\"\"python3 python-clients/scripts/tts/talk.py  --server grpc.nvcf.nvidia.com:443 --use-ssl --metadata function-id \"5e607c81-7aa6-44ce-a11d-9e08f0a3fe49\"  --metadata authorization \"Bearer {NVIDIA_API_KEY}\" --text \"{answer}\" --voice \"English-US-RadTTS.Female-1\" --output {output_audio}\"\"\"\n",
    "    print_with_time(cmd)\n",
    "    process  = subprocess.Popen(cmd, shell=True,\n",
    "stdout=subprocess.PIPE,\n",
    "stderr=subprocess.PIPE,\n",
    "encoding='utf-8')\n",
    "    output, errors = process.communicate()\n",
    "    if is_file_empty(output_audio):\n",
    "        print_with_time( output, errors )\n",
    "        raise ValueError(\"tts出现错误\")\n",
    "    return  {\"answer_audio\":output_audio}\n",
    "\n",
    "# s={\"answer\":\"春景短詩，捕捉梅香鳥驚、風過花落的瞬間，末句化用「花開花落知多少」感時傷逝，意象靈動，餘韻悠長。\"}\n",
    "# r=tts(s)\n",
    "# print(r)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "id": "0dd3c14d-70a9-4afd-a117-663dcfef588c",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/usr/lib/python3/dist-packages/paramiko/transport.py:219: CryptographyDeprecationWarning: Blowfish has been deprecated and will be removed in a future release\n",
      "  \"class\": algorithms.Blowfish,\n"
     ]
    }
   ],
   "source": [
    "import paramiko\n",
    "import traceback\n",
    "\n",
    "def audio2face(state):\n",
    "    try:\n",
    "        answer_audio=state[\"answer_audio\"]\n",
    "        ssh = paramiko.SSHClient()\n",
    "        ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n",
    "        ssh.connect(hostname=AUTODL_HOST, port=AUTODL_PORT, username=AUTODL_USER, password=AUTODL_PASSWORD)\n",
    "    \n",
    "        file_name = os.path.basename(answer_audio)\n",
    "        remote_audio_file=f\"/root/Sonic/examples/wav/{file_name}\"\n",
    "        # 创建SFTP客户端\n",
    "        sftp = paramiko.SFTPClient.from_transport(ssh.get_transport())\n",
    "        # 上传文件 /root/autodl-tmp/\n",
    "        sftp.put(answer_audio, remote_audio_file)\n",
    "        # 关闭SFTP客户端\n",
    "        sftp.close()\n",
    "\n",
    "        file_name=f\"{uuid.uuid4()}.mp4\"\n",
    "        remote_video_file=f\"/root/Sonic/examples/wav/{file_name}\"\n",
    "        cmd=f\"/root/miniconda3/bin/python /root/Sonic/demo.py '/root/Sonic/examples/image/hair.png' '{remote_audio_file}' '{remote_video_file}'\"\n",
    "        print_with_time(cmd)\n",
    "        # 执行远程命令\n",
    "        # stdin, stdout, stderr = ssh.exec_command(\"sudo su\")\n",
    "        # stdin.write(f\"{AUTODL_PASSWORD}\\n\")\n",
    "        # stdin.flush()\n",
    "        stdin, stdout, stderr = ssh.exec_command(cmd)\n",
    "        # 打印命令输出\n",
    "        print(\"stdout\",stdout.read().decode())\n",
    "        print(\"stderr\",stderr.read().decode())\n",
    "        \n",
    "        video_path=\"./video\"\n",
    "        if not os.path.exists(video_path):\n",
    "            os.makedirs(video_path)\n",
    "        local_video_file=f\"{video_path}/{file_name}\"\n",
    "         # 创建SFTP客户端\n",
    "        sftp = paramiko.SFTPClient.from_transport(ssh.get_transport())\n",
    "        # 下载文件 /root/autodl-tmp/\n",
    "        sftp.get(remote_video_file, local_video_file)\n",
    "        # 关闭SFTP客户端\n",
    "        sftp.close()\n",
    "        \n",
    "        ssh.close()\n",
    "\n",
    "        if is_file_empty(local_video_file):\n",
    "            print(\"stdout\",stdout.read().decode())\n",
    "            print(\"stderr\",stderr.read().decode())\n",
    "            raise ValueError(\"audio2face出现错误\")\n",
    "        return  {\"answer_video\":local_video_file}\n",
    "    except Exception as e:\n",
    "        error_msg = traceback.format_exc()\n",
    "        print_with_time(error_msg)\n",
    "        raise ValueError(\"audio2face出现错误\")\n",
    "        \n",
    "    return  {\"answer_video\":\"\"}\n",
    "    \n",
    "\n",
    "# s=State(answer_audio=\"./audio/c07eaa93-66ae-41df-8c82-64a6dd6be988.wav\")\n",
    "# r=audio2face(s)\n",
    "# print(r)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "id": "7534053f-b80a-4332-9d93-f8022ba5a5d4",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Router\n",
    "from langchain_core.output_parsers import StrOutputParser\n",
    "from langchain.output_parsers import PydanticOutputParser\n",
    "from langchain_core.prompts import ChatPromptTemplate\n",
    "from utility import PPrint\n",
    "from entity import RouteQuery\n",
    "\n",
    "def route_question(state):\n",
    "    \"\"\"\n",
    "    Route question to direct answer or RAG.\n",
    "\n",
    "    Args:\n",
    "        state (dict): The current graph state\n",
    "\n",
    "    Returns:\n",
    "        str: Next node to call\n",
    "    \"\"\"\n",
    "    print_with_time(\"route_question\")\n",
    "    \n",
    "    system = \"\"\"You are an expert at routing a user question to a vectorstore or web search.\n",
    "    The vectorstore contains documents related to internal company information, IT technical knowledge, and adversarial attacks.\n",
    "    Use the vectorstore for questions on these topics. Otherwise, use direct_answer.\n",
    "    以 JSON 的形式输出，输出的 JSON 需遵守以下的格式：\n",
    "\n",
    "    {{\n",
    "      \"datasource\": <数据来源，值为 vectorstore 或者 direct_answer>\n",
    "    }}\"\"\"\n",
    "    route_prompt = ChatPromptTemplate.from_messages(\n",
    "        [\n",
    "            (\"system\", system),\n",
    "            (\"human\", \"{question}\"),\n",
    "        ]\n",
    "    )\n",
    "    # print(route_prompt)\n",
    "    \n",
    "    structure_parser = PydanticOutputParser(pydantic_object=RouteQuery)\n",
    "    # print(structure_parser.get_format_instructions())\n",
    "\n",
    "    chain = (\n",
    "        route_prompt\n",
    "        | llm_deepseek_r1 \n",
    "        # | PPrint()\n",
    "        | structure_parser\n",
    "        # | StrOutputParser()\n",
    "    )\n",
    "    question = state[\"question\"]\n",
    "    source =chain.invoke({\"question\": question})\n",
    "    if source.datasource == \"vectorstore\":\n",
    "        return \"vectorstore\"\n",
    "    else:\n",
    "        return \"direct_answer\"\n",
    "\n",
    "# s=State(question=\"手语识别(SLR)技术是什么\")\n",
    "# print(route_question(s))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "id": "e7862ef9-5d26-4372-a31f-4139e497b248",
   "metadata": {},
   "outputs": [],
   "source": [
    "# \n",
    "from langchain.vectorstores import FAISS\n",
    "\n",
    "def retrieve(state):\n",
    "    print_with_time(\"retrieve\")\n",
    "    \n",
    "    question = state[\"question\"]\n",
    "\n",
    "    store = FAISS.load_local(\"./embedding\", embedder,allow_dangerous_deserialization=True)\n",
    "    retriever = store.as_retriever(search_kwargs={\"k\": 3})\n",
    "    documents  = retriever.invoke(question)\n",
    "    return {\"documents\": documents }\n",
    "\n",
    "# s=State(question=\" 不a见\")\n",
    "# print(retrieve(s))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "id": "fffcf162-928f-40e9-ae9e-abc808e389d7",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Retrieval Grader\n",
    "from entity import GradeDocuments\n",
    "\n",
    "def grade_documents(state):\n",
    "    print_with_time(\"grade_documents\")\n",
    "    \n",
    "    question=state[\"question\"]\n",
    "    documents=state[\"documents\"]\n",
    "    \n",
    "    # Prompt\n",
    "    system = \"\"\"You are a grader assessing relevance of a retrieved document to a user question. \\n \n",
    "        If the document contains keyword(s) or semantic meaning related to the user question, grade it as relevant. \\n\n",
    "        It does not need to be a stringent test. The goal is to filter out erroneous retrievals. \\n\n",
    "        Give a binary score 'yes' or 'no' score to indicate whether the document is relevant to the question.\n",
    "        以 JSON 的形式输出，输出的 JSON 需遵守以下的格式：\n",
    "\n",
    "        {{\n",
    "          \"binary_score\": <相关度，值为 yes 或者 no>\n",
    "        }}\"\"\"\n",
    "    grade_prompt = ChatPromptTemplate.from_messages(\n",
    "        [\n",
    "            (\"system\", system),\n",
    "            (\"human\", \"Retrieved document: \\n\\n {document} \\n\\n User question: {question}\"),\n",
    "        ]\n",
    "    )\n",
    "\n",
    "    structure_parser = PydanticOutputParser(pydantic_object=GradeDocuments)\n",
    "    # print(structure_parser.get_format_instructions())\n",
    "\n",
    "    chain = (\n",
    "        grade_prompt\n",
    "        | llm_deepseek_r1 \n",
    "        # | PPrint()\n",
    "        | structure_parser\n",
    "        # | StrOutputParser()\n",
    "    )\n",
    "    \n",
    "    filtered_docs=[]\n",
    "    for d in documents:\n",
    "        score=chain.invoke({\"question\": question, \"document\": d.page_content})\n",
    "        # print(d,score)\n",
    "        grade=score.binary_score\n",
    "        if grade==\"yes\":\n",
    "            filtered_docs.append(d)\n",
    "            \n",
    "        # break\n",
    "            \n",
    "    return {\"documents\": filtered_docs }\n",
    "\n",
    "# s=State(question=\"手语识别(SLR)技术是什么\")\n",
    "# docs=retrieve(s)\n",
    "# s=State(question=\"手语识别(SLR)技术是什么\",documents= docs[\"documents\"])\n",
    "# print(grade_documents(s))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "id": "14291fb5-d74c-40f2-8aee-c3e6d45848e6",
   "metadata": {},
   "outputs": [],
   "source": [
    "def decide_to_generate(state):\n",
    "    print_with_time(\"decide_to_generate\")\n",
    "    \n",
    "    documents=state[\"documents\"]\n",
    "    if(len(documents)>0):\n",
    "        return \"generate\"\n",
    "    else:\n",
    "        return \"direct_answer\""
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "id": "122a157d-d1ac-4941-82b4-e304a5f65b98",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Generate\n",
    "def generate(state):\n",
    "    print_with_time(\"generate\")\n",
    "    \n",
    "    question=state[\"question\"]\n",
    "    documents=state[\"documents\"]\n",
    "    prompt = ChatPromptTemplate.from_messages(\n",
    "        [\n",
    "            (\n",
    "                \"system\",\n",
    "                \"\"\"Please provide a concise answer without repeating the user's question, keeping it under 100 characters. answer solely based on the following context:\\n<Documents>\\n{context}\\n</Documents>.\n",
    "                \"\"\",\n",
    "            ),\n",
    "            (\"user\", \"{question}\"),\n",
    "        ]\n",
    "    )\n",
    "    \n",
    "    chain = (\n",
    "        prompt\n",
    "        | llm_deepseek_r1\n",
    "        | StrOutputParser()\n",
    "    )\n",
    "    result=chain.invoke({\"context\": documents, \"question\": question})\n",
    "    arr=result.split()\n",
    "    if len(arr[-1])<=0:\n",
    "        print_with_time(result)\n",
    "        raise ValueError(\"generate出现错误\")\n",
    "    return {\"answer\": arr[-1] ,\"messages\": [{\"role\": \"assistant\", \"content\": arr[-1]}]}\n",
    "    \n",
    "# s=State(question=\"手语识别(SLR)技术是什么\")\n",
    "# docs=retrieve(s)\n",
    "# s=State(question=\"手语识别(SLR)技术是什么\",documents= docs[\"documents\"])\n",
    "# print(generate(s))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "id": "f10f7fe5-88ed-47c0-adb7-fc46f493735b",
   "metadata": {},
   "outputs": [],
   "source": [
    "def direct_answer(state):\n",
    "    print_with_time(\"direct_answer\")\n",
    "    \n",
    "    question=state[\"question\"]\n",
    "    prompt = ChatPromptTemplate.from_messages(\n",
    "        [\n",
    "            (\n",
    "                \"system\",\n",
    "                \"\"\"Please provide a concise answer without repeating the user's question, keeping it under 100 characters. \n",
    "                \"\"\",\n",
    "            ),\n",
    "            (\"user\", \"{question}\"),\n",
    "        ]\n",
    "    )\n",
    "    \n",
    "    chain = (\n",
    "        prompt\n",
    "        | llm_deepseek_r1\n",
    "        | StrOutputParser()\n",
    "    )\n",
    "    result=chain.invoke({\"question\": question})\n",
    "    arr=result.split()\n",
    "    if len(arr[-1])<=0:\n",
    "        print_with_time(result)\n",
    "        raise ValueError(\"direct_answer出现错误\")\n",
    "    return {\"answer\": arr[-1] ,\"messages\": [{\"role\": \"assistant\", \"content\": arr[-1]}]}\n",
    "\n",
    "# s=State(question=\"手语识别(SLR)技术是什么\")\n",
    "# print(direct_answer(s))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "id": "f6f9c4ad-ab69-4ff9-975a-4d3df3c835fd",
   "metadata": {},
   "outputs": [],
   "source": [
    "from langchain.schema.runnable import RunnableLambda\n",
    "from langchain.schema.runnable.passthrough import RunnableAssign\n",
    "from langchain_core.runnables import RunnableBranch\n",
    "\n",
    "route_question_branch=RunnableBranch(\n",
    "  (lambda x: x.get('route') ==\"vectorstore\", RunnableLambda(retrieve)),\n",
    "    lambda x: x\n",
    ")\n",
    "chain =(\n",
    "     RunnableLambda(asr)\n",
    "    |RunnableAssign({\"route\":RunnableLambda(route_question)})\n",
    "    |route_question_branch\n",
    "    |RunnableLambda(direct_answer) \n",
    "    |RunnableLambda(tts)\n",
    "    |RunnableLambda(audio2face)\n",
    ")\n",
    "\n",
    "def handle(audio):\n",
    "    print_with_time(\"开始处理\")\n",
    "\n",
    "    # yield \"./video/prologue.mp4\",\"等待响应\"\n",
    "    try:\n",
    "        \n",
    "        video=\"\"\n",
    "        answer=\"\"\n",
    "        for chunk  in chain.stream({\"question_audio\":audio}):\n",
    "            print(chunk)\n",
    "            # for value in event.values():\n",
    "            #     print_with_time(\"=\"*100)\n",
    "            #     print_with_time(value)\n",
    "            #     if value.get(\"answer_video\"):\n",
    "            #         video=value[\"answer_video\"]\n",
    "            #     if value.get(\"answer\"):\n",
    "            #         answer=value[\"answer\"]\n",
    "        if video==\"\":\n",
    "            video=\"./video/prologue.mp4\"\n",
    "        return video,answer\n",
    "    except Exception as e:\n",
    "        error_msg = traceback.format_exc()\n",
    "        print_with_time(error_msg)\n",
    "        return \"./video/prologue.mp4\",e.args\n",
    "\n",
    "    print_with_time(\"处理结束\")\n",
    "    \n",
    "# handle(\"./test.wav\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "62a2874e-3ed6-4e23-b9a3-7dc03051dc28",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "IMPORTANT: You are using gradio version 4.31.1, however version 4.44.1 is available, please upgrade.\n",
      "--------\n",
      "Running on local URL:  http://0.0.0.0:5000\n",
      "\n",
      "To create a public link, set `share=True` in `launch()`.\n"
     ]
    },
    {
     "data": {
      "text/html": [
       "<div><iframe src=\"http://localhost:5000/\" width=\"100%\" height=\"500\" allow=\"autoplay; camera; microphone; clipboard-read; clipboard-write;\" frameborder=\"0\" allowfullscreen></iframe></div>"
      ],
      "text/plain": [
       "<IPython.core.display.HTML object>"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    }
   ],
   "source": [
    "import gradio as gr\n",
    "\n",
    "\n",
    "with gr.Blocks(title=\"Galaxie\") as block:\n",
    "    gr.HTML(\n",
    "        f\"\"\"\n",
    "        <h1 style='text-align: center;'>Galaxie </h1>\n",
    "        <h3 style='text-align: center;'> 基于NIM构建的语音转3D虚拟人（A2F-3D） </h3>\n",
    "        <p style='text-align: center;'> Powered by <a href=\"https://github.com/huggingface/parler-tts\"> Bolt</a>\n",
    "        \"\"\"\n",
    "    )\n",
    "    \n",
    "    # with gr.Row():\n",
    "    #     with gr.Column(scale=1):\n",
    "    #         audio_in = gr.Audio(label=\"我\", sources=\"microphone\",type=\"filepath\")\n",
    "    #     with gr.Column(scale=4):\n",
    "    #         with gr.Group():\n",
    "    #             audio_out = gr.Video(label=\"美女\",autoplay=True,value=\"./video/prologue.mp4\",height=500)\n",
    "    #             answer = gr.Textbox(show_label=False)\n",
    "    with gr.Group():\n",
    "        audio_out = gr.Video(label=\"Galaxie\",autoplay=True,value=\"./video/prologue.mp4\",height=400)\n",
    "        answer = gr.Textbox(show_label=False)\n",
    "    audio_in = gr.Audio(label=\"我\", sources=\"microphone\",type=\"filepath\")\n",
    "    \n",
    "        \n",
    "    audio_in.stop_recording(handle, audio_in, [audio_out,answer ])\n",
    "\n",
    "block.launch(debug=True, share=False, show_api=False, server_port=5000, server_name=\"0.0.0.0\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "83e883ff-83ee-49f0-8aad-00ac3f868048",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.8.10"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
