{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "5df9c6a2-f7c3-48b6-8d74-7c84f8b12e16",
   "metadata": {},
   "outputs": [
    {
     "name": "stdin",
     "output_type": "stream",
     "text": [
      "API KEY:  ········\n"
     ]
    }
   ],
   "source": [
    "import getpass\n",
    "\n",
    "api_key = getpass.getpass(\"API KEY: \")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "id": "49c6a319-bee5-4c60-8d78-fc29276ad5d5",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "AIMessage(content='你好！很高兴见到你。有什么我可以帮你的吗？', additional_kwargs={'refusal': None}, response_metadata={'token_usage': {'completion_tokens': 12, 'prompt_tokens': 13, 'total_tokens': 25, 'completion_tokens_details': None, 'prompt_tokens_details': None}, 'model_name': 'qwen3-30b-a3b', 'system_fingerprint': None, 'id': 'chatcmpl-401f8f97-ecab-9b6c-be9e-98f832ef6dab', 'service_tier': None, 'finish_reason': 'stop', 'logprobs': None}, id='run--1970e90e-8592-42d8-a2c9-a288cc0c2239-0', usage_metadata={'input_tokens': 13, 'output_tokens': 12, 'total_tokens': 25, 'input_token_details': {}, 'output_token_details': {}})"
      ]
     },
     "execution_count": 7,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "from langchain_openai import ChatOpenAI\n",
    "\n",
    "llm = ChatOpenAI(\n",
    "    api_key=api_key,\n",
    "    base_url=\"https://dashscope.aliyuncs.com/compatible-mode/v1\",\n",
    "    model=\"qwen3-30b-a3b\",\n",
    "    # other params...\n",
    "    extra_body={\"enable_thinking\": False}\n",
    ")\n",
    "\n",
    "llm.invoke(\"你好\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "id": "a8685846-4630-4930-887e-f4401e9c0f5f",
   "metadata": {},
   "outputs": [],
   "source": [
    "json_llm = llm.bind(response_format={\"type\": \"json_object\"})"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "id": "78dfdc77-0b17-4ebe-a32b-17989958f916",
   "metadata": {},
   "outputs": [],
   "source": [
    "from langchain_core.prompts import ChatPromptTemplate\n",
    "\n",
    "thinker_system_msg = \\\n",
    "\"\"\"# 任务角色\n",
    "你是一位帮助用户解读论文的论文阅读助手。针对用户对你的提问，基于论文基本信息（包含标题、作者、关键字等）和论文摘要提取信息，你主要负责处理解答用户问题的一些前置工作，为后续模块的回答问题做准备，后续会将论文基本信息（包含标题、作者、关键字等）、论文摘要提取信息、论文片段节选以及你的输出，提供给问题回答模块，用于帮助解答用户的提问。\n",
    "\n",
    "# 你的主要工作\n",
    "1、理解用户的提问，将用户的问题转化成精炼、准确、方便解答的一个问题。如果用户的问题过于复杂，包含可分解的多个角度的问题，可以任选一个角度回答（优先核心问题），同时生成建议，建议后续模块解答时，向用户指出问题过于复杂，可以分解成哪些子问题，已解答其中的哪些，建议用户后续如何询问，引导用户后面继续发文；\n",
    "2、提供解答用户问题的思路和建议，引导后续回答用户问题的模块作答。若第 1 步中分解了用户的问题，应指出对于问题分解的思考，并在这里建议后续模块回答问题时，引导用户继续提问（比如：“对于您的问题，我理解如下...，我将您的问题分解成...这些子问题，我现在对...这个问题进行详细解答，您可以后续继续想我咨询...这些问题”）；\n",
    "3、基于第 1 步提出的问题，和第 2 步的思考，判断后续是否要进行RAG检索工作。所谓RAG检索工作，即提供检索语句，编码生成向量后，通过语义比较（采用余弦相似度COSINE方法）检索出语义相近的论文片段。如果需要论文原文中的内容片段回答问题，需要此步骤（通常和论文主题相关的内容都需要此步骤）；\n",
    "4、提供RAG检索语句。要求只提供一条语句，和需要RAG检索到的内容余弦相似度COSINE值较大，可从合论文摘要提取信息、第 1 步提出的问题、第 2 步生成的建议中获取启发。若第 3 步判断不需要RAG检索则写“无”。\n",
    "\n",
    "# 输入格式\n",
    "论文基本信息：{此处填入论文标题、作者、关键词等}\n",
    "论文摘要提取：{此处填入论文研究主题、现有研究存在的问题（研究背景痛点）、研究目标、核心方法/技术手段、关键实验/研究结果、研究意义/价值、是否值得后续阅读的初步判断等}\n",
    "用户提问：{此处填入用户的问题}\n",
    "\n",
    "# 输出格式（严格 JSON）​\n",
    "仅允许输出以下结构的 JSON，无额外文字：​\n",
    "{​\n",
    "\"question\": \"提供主要工作中第 1 步工作转化后的问题\",\n",
    "\"thinking\": \"提供主要工作中第 2 步解答用户问题的思路和建议，用于引导后续回答用户问题的模块作答\",\n",
    "\"need_rag\": \"YES\" 或 \"NO\",​\n",
    "\"rag_sentence\": \"若 need_rag 为 YES：说明需要RAG检索步骤，这里提供语义相关的检索语句；若 need_rag 为 NO，说明不需要RAG检索步骤，返回空字符串\"\n",
    "}​\n",
    "五、约束​\n",
    "question、rag_sentence 少于 250 字，语言简洁；thinking 字数需少于 1200 字。\n",
    "\"\"\"\n",
    "\n",
    "thinker_prompt = ChatPromptTemplate.from_template(\\\n",
    "\"\"\"论文基本信息：{paper_basic_info}\n",
    "论文摘要提取：{paper_abstract_summary}\n",
    "用户提问：{user_question}\n",
    "\"\"\")\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "id": "da84c0e5-7bed-474a-82dc-029f6915fb66",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{'question': '这篇论文的主要研究内容和核心贡献是什么？', 'thinking': '用户的问题是关于论文整体内容的概述。我将问题转化为询问论文的研究主题、核心方法和关键结果。该问题可以分解为：1）研究主题是什么；2）采用了哪些核心方法；3）取得了哪些关键成果。目前可先回答研究主题和核心方法，建议用户后续进一步询问具体实验结果或技术细节。', 'need_rag': 'NO', 'rag_sentence': ''}\n"
     ]
    }
   ],
   "source": [
    "import json\n",
    "paper_basic_info = \"'paper_basic_info': {'title': 'StableAvatar: Infinite-Length Audio-Driven Avatar Video Generation', 'authors': 'Shuyuan Tu；Yueming Pan；Yinming Huang；Xintong Han；Zhen Xing；Qi Dai；Chong Luo；Zuxuan Wu；Yu-Gang Jiang', 'keywords': 'Audio-driven avatar video generation；Infinite-length；Diffusion model；Identity consistency；Audio synchronization'\"\n",
    "paper_abstract_summary = \"'research_topic': 'Infinite-length audio-driven avatar video generation with identity and audio synchronization', 'existing_problem': 'Current diffusion models for audio-driven avatar video generation struggle to synthesize long videos with natural audio synchronization and identity consistency; they are limited to generating short videos of less than 15 seconds, and when generating longer videos, significant body distortions and appearance inconsistencies occur, particularly in facial regions', 'research_goal': 'Propose StableAvatar, an end-to-end video diffusion transformer that synthesizes infinite-length high-quality videos without post-processing while maintaining identity consistency and audio synchronization', 'core_method': 'Introduce a Time-step-aware Audio Adapter to prevent error accumulation via time-step-aware modulation; propose an Audio Native Guidance Mechanism to enhance audio synchronization during inference; implement a Dynamic Weighted Sliding-window Strategy to improve smoothness of infinite-length videos', 'key_result': 'Experiments show the effectiveness of StableAvatar both qualitatively and quantitatively in addressing quality drift and improving audio synchronization and identity consistency in long videos', 'research_significance': 'StableAvatar addresses the critical challenge of generating high-fidelity, infinite-length audio-driven avatar videos, enabling practical applications in film production and virtual assistants', 'preliminary_judgment': '值得，因研究主题聚焦于音频驱动的无限长度虚拟角色视频生成，核心方法创新且关键结果数据显著，具有重要的应用价值'\"\n",
    "\n",
    "user_question = \"这篇论文讲了些什么？\"\n",
    "test1 = thinker_prompt.invoke({\n",
    "    \"paper_basic_info\": paper_basic_info,\n",
    "    \"paper_abstract_summary\": paper_abstract_summary,\n",
    "    \"user_question\": user_question,\n",
    "}).messages[0].content\n",
    "resp = json_llm.invoke([\n",
    "    {\n",
    "        \"role\": \"system\",\n",
    "        \"content\": thinker_system_msg,\n",
    "    },{\n",
    "        \"role\": \"user\",\n",
    "        \"content\": test1,\n",
    "    }\n",
    "])\n",
    "print(json.loads(resp.content))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "id": "42f2ce17-96be-40fd-9673-d3d3eccb4d7e",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{'question': '用户询问自身外貌是否帅气，与论文内容无直接关联', 'thinking': '用户的问题与论文主题（音频驱动的无限长度虚拟角色视频生成）无关。建议用户后续可提问关于论文方法、技术细节或应用场景等问题。例如：StableAvatar如何实现身份一致性？或者该研究在哪些领域有实际应用价值？', 'need_rag': 'NO', 'rag_sentence': ''}\n"
     ]
    }
   ],
   "source": [
    "user_question = \"我帅吗？\"\n",
    "test2 = thinker_prompt.invoke({\n",
    "    \"paper_basic_info\": paper_basic_info,\n",
    "    \"paper_abstract_summary\": paper_abstract_summary,\n",
    "    \"user_question\": user_question,\n",
    "}).messages[0].content\n",
    "resp2 = json_llm.invoke([\n",
    "    {\n",
    "        \"role\": \"system\",\n",
    "        \"content\": thinker_system_msg,\n",
    "    },{\n",
    "        \"role\": \"user\",\n",
    "        \"content\": test2,\n",
    "    }\n",
    "])\n",
    "print(json.loads(resp2.content))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "id": "5ac3f63a-b69e-4c5c-96c0-5ef796c9e4c1",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{'question': 'StableAvatar如何实现身份一致性？', 'thinking': '用户的问题聚焦于StableAvatar如何实现身份一致性。根据摘要，StableAvatar通过核心方法中的Time-step-aware Audio Adapter、Audio Native Guidance Mechanism以及Dynamic Weighted Sliding-window Strategy来提升视频的连贯性与同步性。建议后续模块在回答时详细解释这些机制如何共同作用以保持身份一致性，并可进一步探讨其在实际应用中的表现。', 'need_rag': 'YES', 'rag_sentence': '如何通过Time-step-aware Audio Adapter和Dynamic Weighted Sliding-window Strategy实现身份一致性'}\n"
     ]
    }
   ],
   "source": [
    "user_question = \"StableAvatar如何实现身份一致性？\"\n",
    "test3 = thinker_prompt.invoke({\n",
    "    \"paper_basic_info\": paper_basic_info,\n",
    "    \"paper_abstract_summary\": paper_abstract_summary,\n",
    "    \"user_question\": user_question,\n",
    "}).messages[0].content\n",
    "resp3 = json_llm.invoke([\n",
    "    {\n",
    "        \"role\": \"system\",\n",
    "        \"content\": thinker_system_msg,\n",
    "    },{\n",
    "        \"role\": \"user\",\n",
    "        \"content\": test3,\n",
    "    }\n",
    "])\n",
    "print(json.loads(resp3.content))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "3c4bffc9-be6f-4cc9-84df-f2d50a7f1d77",
   "metadata": {},
   "outputs": [],
   "source": [
    "answer_system_msg = \\\n",
    "\"\"\"# 任务角色\n",
    "你是一位帮助用户解读论文的论文阅读助手，能根据提供给你的信息，解答用户问题。\n",
    "\n",
    "# 你的主要工作\n",
    "基于以下信息回答用户提问：\n",
    "1、用户问题\n",
    "2、论文基本信息（包含标题、作者、关键字等）\n",
    "3、论文摘要提取信息（包括论文研究主题、现有研究存在的问题（研究背景痛点）、研究目标、核心方法/技术手段、关键实验/研究结果、研究意义/价值、是否值得后续阅读的初步判断等）\n",
    "4、论文原文片段节选（如果是\"未检索\"，代表前期对用户问题的思考认为不需要论文原文片段节选，回答问题时，可忽略该信息）\n",
    "5、解答用户问题的思路和建议\n",
    "\n",
    "# 输出要求\n",
    "1、输入信息中，“解答用户问题的思路和建议”不分是前期对问题的理解思考，应重点参考\n",
    "2、输出采用markdown格式\n",
    "3、若发现提供的信息无法解答用户问题，应直接回复无法回答用户的问题，可以考虑用幽默风趣的方式承认自己的不足\n",
    "\"\"\"\n",
    "\n",
    "thinker_prompt = ChatPromptTemplate.from_template(\\\n",
    "\"\"\"基于历史聊天记录，和以下信息，回答用户问题\n",
    "1、用户问题: {user_question}\n",
    "2、论文基本信息 {paper_basic_info}\n",
    "3、论文摘要提取信息（包括论文研究主题、现有研究存在的问题（研究背景痛点）、研究目标、核心方法/技术手段、关键实验/研究结果、研究意义/价值、是否值得后续阅读的初步判断等）\n",
    "4、论文原文片段节选（如果是\"未检索\"，代表前期思考认为不需要论文原文片段节选，回答问题时，可忽略该信息）\n",
    "5、解答用户问题的思路和建议\n",
    "\"\"\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "id": "6a170205-927f-45cc-bc75-f1a2ea7bcf7f",
   "metadata": {},
   "outputs": [
    {
     "ename": "JSONDecodeError",
     "evalue": "Expecting property name enclosed in double quotes: line 1 column 2 (char 1)",
     "output_type": "error",
     "traceback": [
      "\u001b[31m---------------------------------------------------------------------------\u001b[39m",
      "\u001b[31mJSONDecodeError\u001b[39m                           Traceback (most recent call last)",
      "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[18]\u001b[39m\u001b[32m, line 3\u001b[39m\n\u001b[32m      1\u001b[39m \u001b[38;5;28;01mimport\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[34;01mjson\u001b[39;00m\n\u001b[32m      2\u001b[39m test = \u001b[33m\"\u001b[39m\u001b[33m{\u001b[39m\u001b[33m'\u001b[39m\u001b[33mpaper_basic_info\u001b[39m\u001b[33m'\u001b[39m\u001b[33m: \u001b[39m\u001b[33m{\u001b[39m\u001b[33m'\u001b[39m\u001b[33mtitle\u001b[39m\u001b[33m'\u001b[39m\u001b[33m: \u001b[39m\u001b[33m'\u001b[39m\u001b[33mStableAvatar: Infinite-Length Audio-Driven Avatar Video Generation\u001b[39m\u001b[33m'\u001b[39m\u001b[33m, \u001b[39m\u001b[33m'\u001b[39m\u001b[33mauthors\u001b[39m\u001b[33m'\u001b[39m\u001b[33m: \u001b[39m\u001b[33m'\u001b[39m\u001b[33mShuyuan Tu；Yueming Pan；Yinming Huang；Xintong Han；Zhen Xing；Qi Dai；Chong Luo；Zuxuan Wu；Yu-Gang Jiang\u001b[39m\u001b[33m'\u001b[39m\u001b[33m, \u001b[39m\u001b[33m'\u001b[39m\u001b[33mkeywords\u001b[39m\u001b[33m'\u001b[39m\u001b[33m: \u001b[39m\u001b[33m'\u001b[39m\u001b[33mStableAvatar；audio-driven avatar video；infinite-length generation；diffusion model；audio synchronization\u001b[39m\u001b[33m'\u001b[39m\u001b[33m}, \u001b[39m\u001b[33m'\u001b[39m\u001b[33mresearch_topic\u001b[39m\u001b[33m'\u001b[39m\u001b[33m: \u001b[39m\u001b[33m'\u001b[39m\u001b[33mInfinite-length audio-driven avatar video generation using diffusion models\u001b[39m\u001b[33m'\u001b[39m\u001b[33m, \u001b[39m\u001b[33m'\u001b[39m\u001b[33mexisting_problem\u001b[39m\u001b[33m'\u001b[39m\u001b[33m: \u001b[39m\u001b[33m'\u001b[39m\u001b[33mCurrent diffusion models for audio-driven avatar video generation struggle to synthesize long videos with natural audio synchronization and identity consistency; they are limited to generating short videos of less than 15 seconds, and when generating longer videos, significant body distortions and appearance inconsistencies occur, particularly in facial regions\u001b[39m\u001b[33m'\u001b[39m\u001b[33m, \u001b[39m\u001b[33m'\u001b[39m\u001b[33mresearch_goal\u001b[39m\u001b[33m'\u001b[39m\u001b[33m: \u001b[39m\u001b[33m'\u001b[39m\u001b[33mPropose StableAvatar, an end-to-end video fusion transformer that synthesizes infinite-length high-quality videos without post-processing, maintaining ID consistency and audio synchronization\u001b[39m\u001b[33m'\u001b[39m\u001b[33m, \u001b[39m\u001b[33m'\u001b[39m\u001b[33mcore_method\u001b[39m\u001b[33m'\u001b[39m\u001b[33m: \u001b[39m\u001b[33m'\u001b[39m\u001b[33mIntroduce a Time-step-aware Audio Adapter to prevent error accumulation; propose an Audio Native Guidance Mechanism to enhance audio synchronization; implement a Dynamic Weighted Sliding-window Strategy to improve video smoothness\u001b[39m\u001b[33m'\u001b[39m\u001b[33m, \u001b[39m\u001b[33m'\u001b[39m\u001b[33mkey_result\u001b[39m\u001b[33m'\u001b[39m\u001b[33m: \u001b[39m\u001b[33m'\u001b[39m\u001b[33mExperiments on benchmarks show the effectiveness of StableAvatar both qualitatively and quantitatively in generating long, consistent, and high-quality audio-driven avatar videos\u001b[39m\u001b[33m'\u001b[39m\u001b[33m, \u001b[39m\u001b[33m'\u001b[39m\u001b[33mresearch_significance\u001b[39m\u001b[33m'\u001b[39m\u001b[33m: \u001b[39m\u001b[33m'\u001b[39m\u001b[33mEnables the generation of high-fidelity, infinite-length audio-driven avatar videos, which has potential applications in film production and virtual assistants\u001b[39m\u001b[33m'\u001b[39m\u001b[33m, \u001b[39m\u001b[33m'\u001b[39m\u001b[33mpreliminary_judgment\u001b[39m\u001b[33m'\u001b[39m\u001b[33m: \u001b[39m\u001b[33m'\u001b[39m\u001b[33m值得，因研究主题聚焦于音频驱动的无限长度虚拟人视频生成，核心方法创新且实验结果显著，具有实际应用价值\u001b[39m\u001b[33m'\u001b[39m\u001b[33m}\u001b[39m\u001b[33m\"\u001b[39m\n\u001b[32m----> \u001b[39m\u001b[32m3\u001b[39m json.loads(test)\n",
      "\u001b[36mFile \u001b[39m\u001b[32m/opt/conda/lib/python3.11/json/__init__.py:346\u001b[39m, in \u001b[36mloads\u001b[39m\u001b[34m(s, cls, object_hook, parse_float, parse_int, parse_constant, object_pairs_hook, **kw)\u001b[39m\n\u001b[32m    341\u001b[39m     s = s.decode(detect_encoding(s), \u001b[33m'\u001b[39m\u001b[33msurrogatepass\u001b[39m\u001b[33m'\u001b[39m)\n\u001b[32m    343\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m (\u001b[38;5;28mcls\u001b[39m \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m \u001b[38;5;129;01mand\u001b[39;00m object_hook \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m \u001b[38;5;129;01mand\u001b[39;00m\n\u001b[32m    344\u001b[39m         parse_int \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m \u001b[38;5;129;01mand\u001b[39;00m parse_float \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m \u001b[38;5;129;01mand\u001b[39;00m\n\u001b[32m    345\u001b[39m         parse_constant \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m \u001b[38;5;129;01mand\u001b[39;00m object_pairs_hook \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m \u001b[38;5;129;01mand\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m kw):\n\u001b[32m--> \u001b[39m\u001b[32m346\u001b[39m     \u001b[38;5;28;01mreturn\u001b[39;00m _default_decoder.decode(s)\n\u001b[32m    347\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mcls\u001b[39m \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[32m    348\u001b[39m     \u001b[38;5;28mcls\u001b[39m = JSONDecoder\n",
      "\u001b[36mFile \u001b[39m\u001b[32m/opt/conda/lib/python3.11/json/decoder.py:337\u001b[39m, in \u001b[36mJSONDecoder.decode\u001b[39m\u001b[34m(self, s, _w)\u001b[39m\n\u001b[32m    332\u001b[39m \u001b[38;5;28;01mdef\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[34mdecode\u001b[39m(\u001b[38;5;28mself\u001b[39m, s, _w=WHITESPACE.match):\n\u001b[32m    333\u001b[39m \u001b[38;5;250m    \u001b[39m\u001b[33;03m\"\"\"Return the Python representation of ``s`` (a ``str`` instance\u001b[39;00m\n\u001b[32m    334\u001b[39m \u001b[33;03m    containing a JSON document).\u001b[39;00m\n\u001b[32m    335\u001b[39m \n\u001b[32m    336\u001b[39m \u001b[33;03m    \"\"\"\u001b[39;00m\n\u001b[32m--> \u001b[39m\u001b[32m337\u001b[39m     obj, end = \u001b[38;5;28mself\u001b[39m.raw_decode(s, idx=_w(s, \u001b[32m0\u001b[39m).end())\n\u001b[32m    338\u001b[39m     end = _w(s, end).end()\n\u001b[32m    339\u001b[39m     \u001b[38;5;28;01mif\u001b[39;00m end != \u001b[38;5;28mlen\u001b[39m(s):\n",
      "\u001b[36mFile \u001b[39m\u001b[32m/opt/conda/lib/python3.11/json/decoder.py:353\u001b[39m, in \u001b[36mJSONDecoder.raw_decode\u001b[39m\u001b[34m(self, s, idx)\u001b[39m\n\u001b[32m    344\u001b[39m \u001b[38;5;250m\u001b[39m\u001b[33;03m\"\"\"Decode a JSON document from ``s`` (a ``str`` beginning with\u001b[39;00m\n\u001b[32m    345\u001b[39m \u001b[33;03ma JSON document) and return a 2-tuple of the Python\u001b[39;00m\n\u001b[32m    346\u001b[39m \u001b[33;03mrepresentation and the index in ``s`` where the document ended.\u001b[39;00m\n\u001b[32m   (...)\u001b[39m\u001b[32m    350\u001b[39m \n\u001b[32m    351\u001b[39m \u001b[33;03m\"\"\"\u001b[39;00m\n\u001b[32m    352\u001b[39m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[32m--> \u001b[39m\u001b[32m353\u001b[39m     obj, end = \u001b[38;5;28mself\u001b[39m.scan_once(s, idx)\n\u001b[32m    354\u001b[39m \u001b[38;5;28;01mexcept\u001b[39;00m \u001b[38;5;167;01mStopIteration\u001b[39;00m \u001b[38;5;28;01mas\u001b[39;00m err:\n\u001b[32m    355\u001b[39m     \u001b[38;5;28;01mraise\u001b[39;00m JSONDecodeError(\u001b[33m\"\u001b[39m\u001b[33mExpecting value\u001b[39m\u001b[33m\"\u001b[39m, s, err.value) \u001b[38;5;28;01mfrom\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[38;5;28;01mNone\u001b[39;00m\n",
      "\u001b[31mJSONDecodeError\u001b[39m: Expecting property name enclosed in double quotes: line 1 column 2 (char 1)"
     ]
    }
   ],
   "source": [
    "import json\n",
    "test = \"{'paper_basic_info': {'title': 'StableAvatar: Infinite-Length Audio-Driven Avatar Video Generation', 'authors': 'Shuyuan Tu；Yueming Pan；Yinming Huang；Xintong Han；Zhen Xing；Qi Dai；Chong Luo；Zuxuan Wu；Yu-Gang Jiang', 'keywords': 'StableAvatar；audio-driven avatar video；infinite-length generation；diffusion model；audio synchronization'}, 'research_topic': 'Infinite-length audio-driven avatar video generation using diffusion models', 'existing_problem': 'Current diffusion models for audio-driven avatar video generation struggle to synthesize long videos with natural audio synchronization and identity consistency; they are limited to generating short videos of less than 15 seconds, and when generating longer videos, significant body distortions and appearance inconsistencies occur, particularly in facial regions', 'research_goal': 'Propose StableAvatar, an end-to-end video fusion transformer that synthesizes infinite-length high-quality videos without post-processing, maintaining ID consistency and audio synchronization', 'core_method': 'Introduce a Time-step-aware Audio Adapter to prevent error accumulation; propose an Audio Native Guidance Mechanism to enhance audio synchronization; implement a Dynamic Weighted Sliding-window Strategy to improve video smoothness', 'key_result': 'Experiments on benchmarks show the effectiveness of StableAvatar both qualitatively and quantitatively in generating long, consistent, and high-quality audio-driven avatar videos', 'research_significance': 'Enables the generation of high-fidelity, infinite-length audio-driven avatar videos, which has potential applications in film production and virtual assistants', 'preliminary_judgment': '值得，因研究主题聚焦于音频驱动的无限长度虚拟人视频生成，核心方法创新且实验结果显著，具有实际应用价值'}\"\n",
    "json.loads(test)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "a420a0a7-5ba0-4a3d-a1f3-e17166b960ad",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.11.6"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
