{
 "cells": [
  {
   "cell_type": "markdown",
   "id": "acec01f4",
   "metadata": {},
   "source": [
    "# 基于 LLM 的地缘政治推文市场影响与黑天鹅事件预警系统"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "c8cdf86e",
   "metadata": {},
   "source": [
    "## 1. 数据采集"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "4b639e29",
   "metadata": {},
   "outputs": [],
   "source": [
    "import os\n",
    "from pathlib import Path\n",
    "import tweepy\n",
    "import time\n",
    "import datetime\n",
    "from google import genai\n",
    "from google.genai import types\n",
    "from typing import List, Dict, Any, Optional\n",
    "import json"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "63f826f4",
   "metadata": {},
   "source": [
    "### 1.1 实现核心推文获取函数"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "082bbb95",
   "metadata": {},
   "outputs": [],
   "source": [
    "def fetch_recent_posts(\n",
    "    username: str,\n",
    "    bearer_token: str,\n",
    "    lookback_hours: int = 24,\n",
    "    include_replies: bool = False,\n",
    "    include_retweets: bool = False,\n",
    "    limit: Optional[int] = 100\n",
    ") -> List[Dict[str, Any]]:\n",
    "    \"\"\"\n",
    "    从 X (Twitter) API v2 获取单个指定用户的近期推文。\n",
    "\n",
    "    Args:\n",
    "        username (str): 目标用户的 X handle (不带 '@')。\n",
    "        bearer_token (str): 你的 X API v2 Bearer Token。\n",
    "        lookback_hours (int, optional): 回溯的小时数。默认为 24。\n",
    "        include_replies (bool, optional): 是否包含回复。默认为 False。\n",
    "        include_retweets (bool, optional): 是否包含转推。默认为 False。\n",
    "        limit (Optional[int], optional): 返回推文的最大数量。\n",
    "                                         API单次请求上限为100。默认为 100。\n",
    "\n",
    "    Returns:\n",
    "        List[Dict[str, Any]]: 一个包含推文信息的列表。\n",
    "                              每个字典包含: 'id', 'text', 'created_at', 'author_username'。\n",
    "                              如果用户不存在或没有发布推文，则返回空列表。\n",
    "    \"\"\"\n",
    "    print(\"--- 初始化 Tweepy 客户端 ---\")\n",
    "    try:\n",
    "        # 使用 Bearer Token 初始化 Tweepy Client\n",
    "        # client = tweepy.Client(bearer_token)\n",
    "        client = tweepy.Client(bearer_token, wait_on_rate_limit=True)\n",
    "    except Exception as e:\n",
    "        print(f\"错误：无法初始化 Tweepy 客户端: {e}\")\n",
    "        return []\n",
    "\n",
    "    print(f\"--- 正在获取用户 '{username}' 的 ID ---\")\n",
    "    try:\n",
    "        # API v2 需要用户 ID 而不是用户名来获取推文\n",
    "        user_response = client.get_user(username=username)\n",
    "        if user_response.data is None:\n",
    "            print(f\"错误：找不到用户 '{username}'。\")\n",
    "            return []\n",
    "        user_id = user_response.data.id\n",
    "        author_username = user_response.data.username # 保存准确的用户名\n",
    "        print(f\"成功获取用户 ID: {user_id}\")\n",
    "    except tweepy.errors.TweepyException as e:\n",
    "        print(f\"错误：获取用户ID时发生 API 错误: {e}\")\n",
    "        return []\n",
    "\n",
    "    # 计算查询的开始时间 (UTC)\n",
    "    start_time = datetime.datetime.now(datetime.timezone.utc) - datetime.timedelta(hours=lookback_hours)\n",
    "\n",
    "    # 构建排除规则\n",
    "    exclude_rules = []\n",
    "    if not include_replies:\n",
    "        exclude_rules.append('replies')\n",
    "    if not include_retweets:\n",
    "        exclude_rules.append('retweets')\n",
    "\n",
    "    print(f\"--- 正在获取用户 {username} 在过去 {lookback_hours} 小时内的推文 ---\")\n",
    "    print(f\"排除规则: {exclude_rules or '无'}\")\n",
    "    \n",
    "    formatted_tweets = []\n",
    "    \n",
    "    try:\n",
    "        # 使用 Paginator 处理可能跨越多页的请求\n",
    "        # 注意：免费版API的速率限制非常严格，Paginator在请求多页时可能会很快达到上限\n",
    "        paginator = tweepy.Paginator(\n",
    "            client.get_users_tweets,\n",
    "            id=user_id,\n",
    "            start_time=start_time,\n",
    "            exclude=exclude_rules or None,\n",
    "            tweet_fields=[\"id\", \"text\", \"created_at\"],\n",
    "            max_results=min(100, limit) if limit else 100 # API单次请求最多100条\n",
    "        )\n",
    "\n",
    "        # 遍历推文并格式化\n",
    "        for response in paginator:\n",
    "            # 检查响应中是否有数据\n",
    "            if response.data:\n",
    "                for tweet in response.data:\n",
    "                    formatted_tweets.append({\n",
    "                        \"id\": str(tweet.id),\n",
    "                        \"text\": tweet.text,\n",
    "                        \"created_at\": tweet.created_at.isoformat(),\n",
    "                        \"author_username\": author_username\n",
    "                    })\n",
    "                    # 如果达到了用户设定的上限，则停止获取\n",
    "                    if limit and len(formatted_tweets) >= limit:\n",
    "                        break\n",
    "            if limit and len(formatted_tweets) >= limit:\n",
    "                break\n",
    "                \n",
    "    except tweepy.errors.TweepyException as e:\n",
    "        print(f\"错误：获取推文时发生 API 错误: {e}\")\n",
    "        # 即使出错，也返回已经获取到的部分\n",
    "        return formatted_tweets\n",
    "    except Exception as e:\n",
    "        print(f\"发生未知错误: {e}\")\n",
    "        return formatted_tweets\n",
    "\n",
    "    print(f\"--- 成功获取 {len(formatted_tweets)} 条推文 ---\")\n",
    "    return formatted_tweets"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "id": "0a15aa2f",
   "metadata": {},
   "outputs": [],
   "source": [
    "def fetch_recent_posts2(\n",
    "    username: str, \n",
    "    user_id: int, \n",
    "    bearer_token: str,\n",
    "    lookback_hours: int = 24,\n",
    "    include_replies: bool = False,\n",
    "    include_retweets: bool = False,\n",
    "    limit: Optional[int] = 100\n",
    ") -> List[Dict[str, Any]]:\n",
    "    \"\"\"\n",
    "    从 X (Twitter) API v2 获取单个指定用户的近期推文。\n",
    "\n",
    "    Args:\n",
    "        username (str): 目标用户的 X handle (不带 '@')。\n",
    "        bearer_token (str): 你的 X API v2 Bearer Token。\n",
    "        lookback_hours (int, optional): 回溯的小时数。默认为 24。\n",
    "        include_replies (bool, optional): 是否包含回复。默认为 False。\n",
    "        include_retweets (bool, optional): 是否包含转推。默认为 False。\n",
    "        limit (Optional[int], optional): 返回推文的最大数量。\n",
    "                                         API单次请求上限为100。默认为 100。\n",
    "\n",
    "    Returns:\n",
    "        List[Dict[str, Any]]: 一个包含推文信息的列表。\n",
    "                              每个字典包含: 'id', 'text', 'created_at', 'author_username'。\n",
    "                              如果用户不存在或没有发布推文，则返回空列表。\n",
    "    \"\"\"\n",
    "    print(\"--- 初始化 Tweepy 客户端 ---\")\n",
    "    try:\n",
    "        # 使用 Bearer Token 初始化 Tweepy Client\n",
    "        # client = tweepy.Client(bearer_token)\n",
    "        client = tweepy.Client(bearer_token, wait_on_rate_limit=True)\n",
    "    except Exception as e:\n",
    "        print(f\"错误：无法初始化 Tweepy 客户端: {e}\")\n",
    "        return []\n",
    "\n",
    "    # 计算查询的开始时间 (UTC)\n",
    "    start_time = datetime.datetime.now(datetime.timezone.utc) - datetime.timedelta(hours=lookback_hours)\n",
    "\n",
    "    # 构建排除规则\n",
    "    exclude_rules = []\n",
    "    if not include_replies:\n",
    "        exclude_rules.append('replies')\n",
    "    if not include_retweets:\n",
    "        exclude_rules.append('retweets')\n",
    "\n",
    "    print(f\"--- 正在获取用户 {username} 在过去 {lookback_hours} 小时内的推文 ---\")\n",
    "    print(f\"排除规则: {exclude_rules or '无'}\")\n",
    "    \n",
    "    formatted_tweets = []\n",
    "    \n",
    "    try:\n",
    "        # 使用 Paginator 处理可能跨越多页的请求\n",
    "        # 注意：免费版API的速率限制非常严格，Paginator在请求多页时可能会很快达到上限\n",
    "        paginator = tweepy.Paginator(\n",
    "            client.get_users_tweets,\n",
    "            id=user_id,\n",
    "            start_time=start_time,\n",
    "            exclude=exclude_rules or None,\n",
    "            tweet_fields=[\"id\", \"text\", \"created_at\"],\n",
    "            max_results=min(100, limit) if limit else 100 # API单次请求最多100条\n",
    "        )\n",
    "\n",
    "        # 遍历推文并格式化\n",
    "        for response in paginator:\n",
    "            # 检查响应中是否有数据\n",
    "            if response.data:\n",
    "                for tweet in response.data:\n",
    "                    formatted_tweets.append({\n",
    "                        \"id\": str(tweet.id),\n",
    "                        \"text\": tweet.text,\n",
    "                        \"created_at\": tweet.created_at.isoformat(),\n",
    "                        \"author_username\": username\n",
    "                    })\n",
    "                    # 如果达到了用户设定的上限，则停止获取\n",
    "                    if limit and len(formatted_tweets) >= limit:\n",
    "                        break\n",
    "            if limit and len(formatted_tweets) >= limit:\n",
    "                break\n",
    "                \n",
    "    except tweepy.errors.TweepyException as e:\n",
    "        print(f\"错误：获取推文时发生 API 错误: {e}\")\n",
    "        # 即使出错，也返回已经获取到的部分\n",
    "        return formatted_tweets\n",
    "    except Exception as e:\n",
    "        print(f\"发生未知错误: {e}\")\n",
    "        return formatted_tweets\n",
    "\n",
    "    print(f\"--- 成功获取 {len(formatted_tweets)} 条推文 ---\")\n",
    "    return formatted_tweets"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "7bfd817e",
   "metadata": {},
   "outputs": [],
   "source": [
    "my_bearer_token = \"YOUR_BEARER_TOKEN\"  # TODO\n",
    "target_username = \"POTUS\" "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "06fcdb1f",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "--- 初始化 Tweepy 客户端 ---\n",
      "--- 正在获取用户 'POTUS' 的 ID ---\n",
      "成功获取用户 ID: 1879650942410481666\n",
      "--- 正在获取用户 POTUS 在过去 12 小时内的推文 ---\n",
      "排除规则: 无\n",
      "错误：获取推文时发生 API 错误: 429 Too Many Requests\n",
      "Too Many Requests\n"
     ]
    }
   ],
   "source": [
    "recent_tweets = fetch_recent_posts(\n",
    "    username=target_username,\n",
    "    bearer_token=my_bearer_token,\n",
    "    lookback_hours=12,\n",
    "    include_replies=True, \n",
    "    include_retweets=True,\n",
    "    limit=5\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "1c6961c6",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "--- 初始化 Tweepy 客户端 ---\n",
      "--- 正在获取用户 POTUS 在过去 12 小时内的推文 ---\n",
      "排除规则: 无\n",
      "--- 成功获取 5 条推文 ---\n"
     ]
    }
   ],
   "source": [
    "# recent_tweets = fetch_recent_posts2(\n",
    "#     username=target_username,\n",
    "#     user_id = 1879650942410481666, \n",
    "#     bearer_token=my_bearer_token,\n",
    "#     lookback_hours=12,\n",
    "#     include_replies=True, \n",
    "#     include_retweets=True,\n",
    "#     limit=5\n",
    "# )"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "452b9317",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "--- 来自 POTUS 的最新推文: ---\n",
      "  1. ID: 1956497169109614663\n",
      "     发布时间: 2025-08-15T23:24:09+00:00\n",
      "     内容: RT @WhiteHouse: “We made some great progress today… We had an extremely productive meeting and many points were agreed to.” - President Don…\n",
      "\n",
      "  2. ID: 1956489762052727014\n",
      "     发布时间: 2025-08-15T22:54:43+00:00\n",
      "     内容: RT @WhiteHouse: President Trump Participates in a Press Conference with the President of the Russian Federation https://t.co/D07iIhS8lh\n",
      "\n",
      "  3. ID: 1956461315838394436\n",
      "     发布时间: 2025-08-15T21:01:41+00:00\n",
      "     内容: RT @WhiteHouse: 🇺🇸 TONIGHT starting at 6PM EST: President Donald J. Trump sits down with @BretBaier and @SeanHannity on @FoxNews for two ex…\n",
      "\n",
      "  4. ID: 1956446565263876170\n",
      "     发布时间: 2025-08-15T20:03:04+00:00\n",
      "     内容: RT @WhiteHouse: Pursuing Peace. 🇺🇸🇷🇺 https://t.co/d9VqKzpF6g\n",
      "\n",
      "  5. ID: 1956431407728800010\n",
      "     发布时间: 2025-08-15T19:02:50+00:00\n",
      "     内容: RT @WhiteHouse: President Trump Greets the President of the Russian Federation https://t.co/xMG6wxYDEy\n",
      "\n"
     ]
    }
   ],
   "source": [
    "if recent_tweets:\n",
    "    print(f\"--- 来自 {target_username} 的最新推文: ---\")\n",
    "    for i, tweet in enumerate(recent_tweets):\n",
    "        print(f\"  {i+1}. ID: {tweet['id']}\")\n",
    "        print(f\"     作者: {tweet['author_username']}\")\n",
    "        print(f\"     发布时间: {tweet['created_at']}\")\n",
    "        print(f\"     内容: {tweet['text']}\\n\")\n",
    "else:\n",
    "    print(f\"\\n未能从 {target_username} 获取任何推文。\")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "8a167c08",
   "metadata": {},
   "source": [
    "### 1.2：实现多用户推文的批量获取"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "1b91100c",
   "metadata": {},
   "outputs": [],
   "source": [
    "def fetch_all_leaders_posts(\n",
    "    leader_usernames: List[str],\n",
    "    bearer_token: str,\n",
    "    lookback_hours: int = 24,\n",
    "    include_replies: bool = False,\n",
    "    include_retweets: bool = False,\n",
    "    limit_per_user: Optional[int] = 100\n",
    ") -> List[Dict[str, Any]]:\n",
    "    \"\"\"\n",
    "    遍历一个用户名列表，调用 fetch_recent_posts 函数来收集所有人的推文。\n",
    "\n",
    "    Args:\n",
    "        leader_usernames (List[str]): 要获取推文的 X 用户名列表。\n",
    "        bearer_token (str): 你的 X API v2 Bearer Token。\n",
    "        lookback_hours (int, optional): 回溯的小时数。默认为 24。\n",
    "        include_replies (bool, optional): 是否包含回复。默认为 False。\n",
    "        include_retweets (bool, optional): 是否包含转推。默认为 False。\n",
    "        limit_per_user (Optional[int], optional): 对每个用户获取推文的最大数量。默认为 100。\n",
    "\n",
    "    Returns:\n",
    "        List[Dict[str, Any]]: 一个整合了所有目标用户推文的单一列表。\n",
    "    \"\"\"\n",
    "    all_tweets = []\n",
    "    print(f\"\\n=== 开始为 {len(leader_usernames)} 位领导人批量获取推文 ===\")\n",
    "    \n",
    "    for i, username in enumerate(leader_usernames):\n",
    "        print(f\"\\n[{i+1}/{len(leader_usernames)}] ---> 正在处理用户: {username}\")\n",
    "        \n",
    "        # 调用之前定义的函数来获取单个用户的推文\n",
    "        tweets = fetch_recent_posts(\n",
    "            username=username,\n",
    "            bearer_token=bearer_token,\n",
    "            lookback_hours=lookback_hours,\n",
    "            include_replies=include_replies,\n",
    "            include_retweets=include_retweets,\n",
    "            limit=limit_per_user\n",
    "        )\n",
    "        \n",
    "        if tweets:\n",
    "            all_tweets.extend(tweets)\n",
    "            \n",
    "        # 在两次请求之间短暂暂停一下，避免触发过于频繁的请求限制\n",
    "        if i < len(leader_usernames) - 1:\n",
    "             print(\"---> 处理完毕，暂停 1 秒...\")\n",
    "             time.sleep(1)\n",
    "\n",
    "    print(f\"\\n=== 批量获取完成。总共收集到 {len(all_tweets)} 条推文 ===\")\n",
    "    return all_tweets"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "b9ce7253",
   "metadata": {},
   "outputs": [],
   "source": [
    "leaders_to_monitor = [\"MFA_China\", \"POTUS\", \"10DowningStreet\", \"EmmanuelMacron\"]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "id": "b02e8930",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "=== 开始为 4 位领导人批量获取推文 ===\n",
      "\n",
      "[1/4] ---> 正在处理用户: MFA_China\n",
      "--- 初始化 Tweepy 客户端 ---\n",
      "--- 正在获取用户 'MFA_China' 的 ID ---\n",
      "成功获取用户 ID: 1183694592433983489\n",
      "--- 正在获取用户 MFA_China 在过去 12 小时内的推文 ---\n",
      "排除规则: 无\n",
      "--- 成功获取 5 条推文 ---\n",
      "---> 处理完毕，暂停 1 秒...\n",
      "\n",
      "[2/4] ---> 正在处理用户: POTUS\n",
      "--- 初始化 Tweepy 客户端 ---\n",
      "--- 正在获取用户 'POTUS' 的 ID ---\n",
      "成功获取用户 ID: 1879650942410481666\n",
      "--- 正在获取用户 POTUS 在过去 12 小时内的推文 ---\n",
      "排除规则: 无\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Rate limit exceeded. Sleeping for 903 seconds.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "发生未知错误: ('Connection aborted.', RemoteDisconnected('Remote end closed connection without response'))\n",
      "---> 处理完毕，暂停 1 秒...\n",
      "\n",
      "[3/4] ---> 正在处理用户: 10DowningStreet\n",
      "--- 初始化 Tweepy 客户端 ---\n",
      "--- 正在获取用户 '10DowningStreet' 的 ID ---\n",
      "成功获取用户 ID: 14224719\n",
      "--- 正在获取用户 10DowningStreet 在过去 12 小时内的推文 ---\n",
      "排除规则: 无\n",
      "--- 成功获取 1 条推文 ---\n",
      "---> 处理完毕，暂停 1 秒...\n",
      "\n",
      "[4/4] ---> 正在处理用户: EmmanuelMacron\n",
      "--- 初始化 Tweepy 客户端 ---\n",
      "--- 正在获取用户 'EmmanuelMacron' 的 ID ---\n",
      "成功获取用户 ID: 1976143068\n",
      "--- 正在获取用户 EmmanuelMacron 在过去 12 小时内的推文 ---\n",
      "排除规则: 无\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Rate limit exceeded. Sleeping for 902 seconds.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "发生未知错误: ('Connection aborted.', RemoteDisconnected('Remote end closed connection without response'))\n",
      "\n",
      "=== 批量获取完成。总共收集到 6 条推文 ===\n"
     ]
    }
   ],
   "source": [
    "# 调用新的批量获取函数\n",
    "all_tweets_collected = fetch_all_leaders_posts(\n",
    "    leader_usernames=leaders_to_monitor,\n",
    "    bearer_token=my_bearer_token,\n",
    "    lookback_hours=12,\n",
    "    include_replies=True,\n",
    "    include_retweets=True,\n",
    "    limit_per_user=5\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "id": "ce76096f",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "--- 所有监控账号的推文整合结果: ---\n",
      "  1. 用户: MFA_China\n",
      "     ID: 1957459470868770856\n",
      "     发布时间: 2025-08-18T15:08:00+00:00\n",
      "     内容: RT @SpoxCHN_MaoNing: China supports all efforts conducive to a peaceful way out of the Ukraine crisi...\n",
      "\n",
      "  2. 用户: MFA_China\n",
      "     ID: 1957451503381438723\n",
      "     发布时间: 2025-08-18T14:36:20+00:00\n",
      "     内容: RT @ChineseEmb_Uga: Today, H.E. President Museveni witnessed the Commissioning of Wagagai Gold Mine ...\n",
      "\n",
      "  3. 用户: MFA_China\n",
      "     ID: 1957451385483751858\n",
      "     发布时间: 2025-08-18T14:35:52+00:00\n",
      "     内容: RT @EmbChinaPa: Nos complace visitar el hermoso Distrito de Montijo de la Provincia de Veraguas, don...\n",
      "\n",
      "  4. 用户: MFA_China\n",
      "     ID: 1957405647349408140\n",
      "     发布时间: 2025-08-18T11:34:07+00:00\n",
      "     内容: RT @SpoxCHN_MaoNing: In Jinan, Shandong, Eastern China, a smart U-turn system lets multiple cars rev...\n",
      "\n",
      "  5. 用户: MFA_China\n",
      "     ID: 1957392234074280226\n",
      "     发布时间: 2025-08-18T10:40:49+00:00\n",
      "     内容: RT @AmbXieFeng: With passion, open mind and kindness, the young ambassadors have shown our two count...\n",
      "\n",
      "  6. 用户: 10DowningStreet\n",
      "     ID: 1957412655729131630\n",
      "     发布时间: 2025-08-18T12:01:58+00:00\n",
      "     内容: RT @Keir_Starmer: I’m on my way to Washington D.C. to meet @POTUS, @ZelenskyyUa and other leaders.\n",
      "\n",
      "...\n",
      "\n"
     ]
    }
   ],
   "source": [
    "# 打印最终的整合结果\n",
    "if all_tweets_collected:\n",
    "    print(\"--- 所有监控账号的推文整合结果: ---\")\n",
    "    for i, tweet in enumerate(all_tweets_collected):\n",
    "        print(f\"  {i+1}. 用户: {tweet['author_username']}\")\n",
    "        print(f\"     ID: {tweet['id']}\")\n",
    "        print(f\"     发布时间: {tweet['created_at']}\")\n",
    "        # 为了方便预览，只显示部分文本内容\n",
    "        print(f\"     内容: {tweet['text'][:100].replace('/n', ' ')}...\\n\")\n",
    "else:\n",
    "    print(\"未能从任何监控的账号获取到推文。\")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "c4548890",
   "metadata": {},
   "source": [
    "### 1.3：将原始数据持久化存储"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "id": "d4029fba",
   "metadata": {},
   "outputs": [],
   "source": [
    "def save_tweets_to_file(\n",
    "    tweets_data: List[Dict[str, Any]],\n",
    "    output_dir: str = '../data'\n",
    ") -> str:\n",
    "    \"\"\"\n",
    "    将获取到的推文数据列表保存到带有时间戳的 JSON 文件中。\n",
    "\n",
    "    Args:\n",
    "        tweets_data (List[Dict[str, Any]]): 包含推文信息的列表。\n",
    "        output_dir (str, optional): 用于存放输出文件的目录名。\n",
    "                                    默认为 'data'。如果目录不存在，会自动创建。\n",
    "\n",
    "    Returns:\n",
    "        str: 成功保存的文件路径。如果保存失败，则返回空字符串。\n",
    "    \"\"\"\n",
    "    if not tweets_data:\n",
    "        print(\"--- 数据为空，无需保存文件。 ---\")\n",
    "        return \"\"\n",
    "\n",
    "    try:\n",
    "        # 步骤 1: 确保输出目录存在\n",
    "        os.makedirs(output_dir, exist_ok=True)\n",
    "\n",
    "        # 步骤 2: 生成带时间戳的文件名\n",
    "        # 使用当前时间保证文件名唯一\n",
    "        timestamp = datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S')\n",
    "        filename = f\"tweets_{timestamp}.json\"\n",
    "        \n",
    "        # 步骤 3: 构造完整的文件路径\n",
    "        file_path = os.path.join(Path(output_dir), filename)\n",
    "\n",
    "        # 步骤 4: 将数据写入 JSON 文件\n",
    "        print(f\"\\n--- 正在将 {len(tweets_data)} 条推文保存到文件... ---\")\n",
    "        with open(file_path, 'w', encoding='utf-8') as f:\n",
    "            # indent=4 使 JSON 文件格式化，更易于阅读\n",
    "            # ensure_ascii=False 确保多语言字符（如中文、日文、表情符号）能被正确写入\n",
    "            json.dump(tweets_data, f, ensure_ascii=False, indent=4)\n",
    "        \n",
    "        print(f\"保存成功！文件路径: {file_path}\")\n",
    "        return file_path\n",
    "\n",
    "    except Exception as e:\n",
    "        print(f\"错误：保存文件失败。原因: {e}\")\n",
    "        return \"\""
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "id": "715a2fea",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "--- 正在将 6 条推文保存到文件... ---\n",
      "保存成功！文件路径: ..\\data\\tweets_2025-08-18_19-15-09.json\n"
     ]
    }
   ],
   "source": [
    "saved_filepath = save_tweets_to_file(all_tweets_collected)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "f83b7b07",
   "metadata": {},
   "source": [
    "## 2. AI 分析"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "ef64b99a",
   "metadata": {},
   "source": [
    "### 2.1：设计 LLM 的指令"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "db865d96",
   "metadata": {},
   "outputs": [],
   "source": [
    "def generate_analysis_prompt(\n",
    "    markets_list: List[str],\n",
    "    tweets_filepath: str\n",
    ") -> str:\n",
    "    \"\"\"\n",
    "    根据给定的市场和推文数据文件，生成一个完整且高度结构化的 Prompt。\n",
    "    这个版本将详细的输出JSON格式硬编码在函数内部。\n",
    "\n",
    "    Args:\n",
    "        markets_list (List[str]): 需要分析的金融市场列表。\n",
    "        tweets_filepath (str): 存储待分析推文的 JSON 文件的路径。\n",
    "\n",
    "    Returns:\n",
    "        str: 一个可以直接发送给 LLM 的、完整填充的 Prompt 字符串。\n",
    "             如果文件读取失败，则返回空字符串。\n",
    "    \"\"\"\n",
    "    # 步骤 1: 从文件路径读取推文数据\n",
    "    try:\n",
    "        with open(tweets_filepath, 'r', encoding='utf-8') as f:\n",
    "            tweets_data = json.load(f)\n",
    "        tweets_json_str = json.dumps(tweets_data, indent=2, ensure_ascii=False)\n",
    "    except Exception as e:\n",
    "        print(f\"读取或处理推文文件时出错: {e}\")\n",
    "        return \"\"\n",
    "\n",
    "    # 步骤 2: 格式化市场列表为字符串\n",
    "    markets_list_str = \", \".join(f'\"{m}\"' for m in markets_list)\n",
    "\n",
    "    # 步骤 3: 定义最终的、详细的输出格式\n",
    "    output_format_description = \"\"\"\n",
    "Your output must be a single JSON object with one root key: \"analysis_results\".\n",
    "This key holds a list of JSON objects, where each object represents the analysis for a single tweet.\n",
    "\n",
    "The schema for EACH object in the \"analysis_results\" list is as follows:\n",
    "\n",
    "```json\n",
    "{\n",
    "  \"tweet_id\": \"string\",\n",
    "  \"author_username\": \"string\",\n",
    "  \"tweet_text\": \"string\",\n",
    "  \"market_impact\": [\n",
    "    {\n",
    "      \"market\": \"string\",\n",
    "      \"sentiment\": \"string\",\n",
    "      \"reasoning\": \"string\"\n",
    "    }\n",
    "  ],\n",
    "  \"black_swan_alert\": {\n",
    "    \"is_alert\": \"boolean\",\n",
    "    \"category\": \"string_or_null\",\n",
    "    \"severity\": \"string_or_null\",\n",
    "    \"alert_reason\": \"string_or_null\"\n",
    "  }\n",
    "}\n",
    "```\n",
    "\n",
    "### Field Explanations and Constraints:\n",
    "\n",
    "  - `tweet_id`, `author_username`, `tweet_text`: Must be copied directly from the input data for the corresponding tweet.\n",
    "  - `market_impact`: A list containing an analysis object for EACH market specified in the instructions.\n",
    "      - `sentiment`: Must be one of the following strings: \"利好\" (Bullish), \"利空\" (Bearish), or \"中性\" (Neutral).\n",
    "      - `reasoning`: A concise explanation for the sentiment, **written in Simplified Chinese**.\n",
    "  - `black_swan_alert`:\n",
    "      - `is_alert`: `true` if a signal is detected, otherwise `false`.\n",
    "\n",
    "      - `category`: If `is_alert` is `true`, must be one of: \"Major Military Conflict\", \"Nuclear Proliferation/Threat\", \"Sovereign Debt Default\", \"Major Terrorist Attack\", \"Public Health Crisis\", \"Critical Infrastructure Disruption\". If `is_alert` is `false`, this field must be `null`.\n",
    "\n",
    "      - `severity`: If `is_alert` is `true`, must be one of: \"Low\", \"Medium\", \"High\". If `is_alert` is `false`, this must be `null`.\n",
    "\n",
    "      - `alert_reason`: If `is_alert` is `true`, a concise explanation of the threat, **written in Simplified Chinese**. If `is_alert` is `false`, this must be `null`.\n",
    "\"\"\"\n",
    "\n",
    "    # 步骤 4: 构建最终的 Prompt\n",
    "    prompt = f\"\"\"\n",
    "\n",
    "### ROLE\n",
    "\n",
    "You are a top-tier geopolitical and macroeconomic strategist. Your expertise lies in analyzing unstructured data like social media posts from world leaders to forecast market trends and identify nascent global risks.\n",
    "\n",
    "### TASK\n",
    "\n",
    "For each tweet provided, you will perform two critical assessments:\n",
    "\n",
    "1.  **Market Impact Analysis:** Evaluate the tweet's potential impact on each of the specified financial markets.\n",
    "2.  **Black Swan Alert:** Identify if the tweet signals a potential \"Black Swan\" event, defined as a rare, high-impact, hard-to-predict event.\n",
    "\n",
    "### INSTRUCTIONS\n",
    "\n",
    "1.  Analyze the impact on the following markets: {markets_list_str}.\n",
    "2.  Your final output MUST be a single, valid JSON object, with no additional text or markdown formatting.\n",
    "3.  Strictly adhere to the JSON schema and field constraints provided below.\n",
    "4.  **CRITICAL LANGUAGE REQUIREMENT:** The JSON keys must be in English. However, all analytical text values (`sentiment`, `reasoning`, `alert_reason`) MUST be written in **Simplified Chinese (中文)**.\n",
    "\n",
    "### OUTPUT JSON FORMAT\n",
    "\n",
    "{output_format_description}\n",
    "\n",
    "### --- TWEETS DATA TO ANALYZE ---\n",
    "\n",
    "{tweets_json_str}\n",
    "\"\"\"\n",
    "\n",
    "    return prompt"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "41c9ff66",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 定义市场列表\n",
    "markets_to_analyze = [\"US Equities (S&P 500)\", \"Crude Oil (WTI)\", \"Gold\"]\n",
    "\n",
    "# 生成完整的 Prompt\n",
    "full_prompt = generate_analysis_prompt(\n",
    "    markets_list=markets_to_analyze,\n",
    "    tweets_filepath=saved_filepath\n",
    ")\n",
    "\n",
    "if full_prompt:\n",
    "    print(\"--- SUCCESSFULLY GENERATED PROMPT ---\")\n",
    "    print(full_prompt)\n",
    "else:\n",
    "    print(\"--- FAILED TO GENERATE PROMPT ---\")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "e188a3e2",
   "metadata": {},
   "source": [
    "### 2.2：实现核心分析函数"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "70ce56c6",
   "metadata": {},
   "outputs": [],
   "source": [
    "def analyze_tweets_with_gemini(\n",
    "    tweets_filepath: str,\n",
    "    markets_list: List[str],\n",
    "    google_api_key: str,\n",
    "    model_name: str = \"gemini-2.5-flash\"\n",
    ") -> Optional[str]:\n",
    "    \"\"\"\n",
    "    读取推文文件，构建Prompt，调用Gemini API进行分析，并返回结果。\n",
    "    (此版本采用 genai.Client 的 SDK 调用方式)\n",
    "\n",
    "    Args:\n",
    "        tweets_filepath (str): 存储待分析推文的JSON文件的路径。\n",
    "        markets_list (List[str]): 需要分析的金融市场列表。\n",
    "        google_api_key (str): 你的 Google AI Studio API 密钥。\n",
    "        model_name (str, optional): 使用的Gemini模型名称。默认为 \"gemini-1.5-flash\"。\n",
    "\n",
    "    Returns:\n",
    "        Optional[str]: 从Gemini API返回的、符合预设格式的JSON字符串。\n",
    "                       如果过程中发生任何错误，则返回 None。\n",
    "    \"\"\"\n",
    "    print(\"--- 步骤 1: 生成分析用的 Prompt ---\")\n",
    "    full_prompt = generate_analysis_prompt(\n",
    "        markets_list=markets_list,\n",
    "        tweets_filepath=tweets_filepath\n",
    "    )\n",
    "\n",
    "    if not full_prompt:\n",
    "        print(\"错误: Prompt 生成失败，分析中止。\")\n",
    "        return None\n",
    "    \n",
    "    print(\"Prompt 生成成功！\")\n",
    "    \n",
    "    try:\n",
    "        print(f\"--- 步骤 2: 配置并调用 Google Gemini API (模型: {model_name}) ---\")\n",
    "        \n",
    "        # 步骤 2.1: 初始化 Client\n",
    "        client = genai.Client(api_key=google_api_key)\n",
    "        \n",
    "        # 步骤 2.2: 设置生成配置，强制要求JSON输出\n",
    "        generation_config = types.GenerateContentConfig(\n",
    "            response_mime_type=\"application/json\"\n",
    "        )\n",
    "\n",
    "        # 步骤 2.3: 调用API\n",
    "        # 使用 client.models.generate_content 直接调用\n",
    "        response = client.models.generate_content(\n",
    "            model=f\"models/{model_name}\", # 模型名称需要 'models/' 前缀\n",
    "            contents=full_prompt,\n",
    "            config=generation_config\n",
    "        )\n",
    "\n",
    "        print(\"API 调用成功，已收到响应！\")\n",
    "        \n",
    "        # 检查响应中是否有内容\n",
    "        if not response.text:\n",
    "            print(\"错误: API 返回了空的响应。可能的原因是内容安全策略拦截。\")\n",
    "            print(\"Prompt Feedback:\", response.prompt_feedback)\n",
    "            return None\n",
    "            \n",
    "        # 验证并返回结果\n",
    "        json.loads(response.text)\n",
    "        print(\"响应内容是有效的JSON格式。\")\n",
    "        \n",
    "        return response.text\n",
    "\n",
    "    except Exception as e:\n",
    "        print(f\"在与 Gemini API 交互时发生严重错误: {e}\")\n",
    "        return None"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "78817ae4",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "--- 步骤 1: 生成分析用的 Prompt ---\n",
      "Prompt 生成成功！\n",
      "--- 步骤 2: 配置并调用 Google Gemini API (模型: gemini-2.5-flash) ---\n",
      "API 调用成功，已收到响应！\n",
      "响应内容是有效的JSON格式。\n",
      "\n",
      "\n",
      "--- 最终分析结果 (JSON) ---\n",
      "{\n",
      "  \"analysis_results\": [\n",
      "    {\n",
      "      \"tweet_id\": \"182510101010101010\",\n",
      "      \"author_username\": \"USTreasury\",\n",
      "      \"tweet_text\": \"BREAKING: The latest US jobs report far exceeds expectations, with unemployment dropping to a historic low. This is a clear sign of a robust and resilient American economy. Confidence is high.\",\n",
      "      \"market_impact\": [\n",
      "        {\n",
      "          \"market\": \"US Equities (S&P 500)\",\n",
      "          \"sentiment\": \"利好\",\n",
      "          \"reasoning\": \"强劲的就业报告和历史性低失业率表明美国经济强劲，提振投资者信心，利好股市。\"\n",
      "        },\n",
      "        {\n",
      "          \"market\": \"Euro/USD Exchange Rate\",\n",
      "          \"sentiment\": \"利空\",\n",
      "          \"reasoning\": \"积极的美国经济数据可能导致美联储采取更鹰派的货币政策立场，从而支撑美元走强，导致欧元兑美元汇率下跌。\"\n",
      "        },\n",
      "        {\n",
      "          \"market\": \"Gold\",\n",
      "          \"sentiment\": \"利空\",\n",
      "          \"reasoning\": \"经济强劲和美元走强通常会降低对避险资产黄金的需求，因为投资者更倾向于风险资产。\"\n",
      "        }\n",
      "      ],\n",
      "      \"black_swan_alert\": {\n",
      "        \"is_alert\": false,\n",
      "        \"category\": null,\n",
      "        \"severity\": null,\n",
      "        \"alert_reason\": null\n",
      "      }\n",
      "    },\n",
      "    {\n",
      "      \"tweet_id\": \"182510202020202020\",\n",
      "      \"author_username\": \"ECB_President\",\n",
      "      \"tweet_text\": \"Inflationary pressures in the Eurozone remain a primary concern. We must stay the course with tight monetary policy, which may unfortunately dampen short-term growth prospects for the bloc.\",\n",
      "      \"market_impact\": [\n",
      "        {\n",
      "          \"market\": \"US Equities (S&P 500)\",\n",
      "          \"sentiment\": \"中性\",\n",
      "          \"reasoning\": \"该推文主要关注欧元区内部问题，对美国股市的直接影响有限，但可能通过全球增长放缓间接产生轻微负面影响。\"\n",
      "        },\n",
      "        {\n",
      "          \"market\": \"Euro/USD Exchange Rate\",\n",
      "          \"sentiment\": \"利空\",\n",
      "          \"reasoning\": \"欧洲央行持续紧缩货币政策以应对通胀，可能抑制经济增长，从而对欧元构成压力，导致欧元兑美元汇率下跌。\"\n",
      "        },\n",
      "        {\n",
      "          \"market\": \"Gold\",\n",
      "          \"sentiment\": \"利好\",\n",
      "          \"reasoning\": \"欧元区经济增长放缓的担忧以及持续的通胀压力可能增加对避险资产黄金的需求。\"\n",
      "        }\n",
      "      ],\n",
      "      \"black_swan_alert\": {\n",
      "        \"is_alert\": false,\n",
      "        \"category\": null,\n",
      "        \"severity\": null,\n",
      "        \"alert_reason\": null\n",
      "      }\n",
      "    },\n",
      "    {\n",
      "      \"tweet_id\": \"182510303030303030\",\n",
      "      \"author_username\": \"UN_Spokesperson\",\n",
      "      \"tweet_text\": \"International diplomatic talks regarding the East Sea dispute have broken down without resolution. Tensions in the region are escalating, and we urge all parties to exercise maximum restraint.\",\n",
      "      \"market_impact\": [\n",
      "        {\n",
      "          \"market\": \"US Equities (S&P 500)\",\n",
      "          \"sentiment\": \"利空\",\n",
      "          \"reasoning\": \"地区冲突加剧可能扰乱全球供应链、能源市场，并增加地缘政治不确定性，从而对全球股市（包括标普500）造成负面影响。\"\n",
      "        },\n",
      "        {\n",
      "          \"market\": \"Euro/USD Exchange Rate\",\n",
      "          \"sentiment\": \"利空\",\n",
      "          \"reasoning\": \"国际地缘政治紧张局势通常会促使投资者转向美元这一传统避险货币，从而导致美元走强，欧元兑美元汇率下跌。\"\n",
      "        },\n",
      "        {\n",
      "          \"market\": \"Gold\",\n",
      "          \"sentiment\": \"利好\",\n",
      "          \"reasoning\": \"地区冲突升级的威胁会显著增加地缘政治风险，促使投资者寻求避险资产，从而推高黄金价格。\"\n",
      "        }\n",
      "      ],\n",
      "      \"black_swan_alert\": {\n",
      "        \"is_alert\": true,\n",
      "        \"category\": \"Major Military Conflict\",\n",
      "        \"severity\": \"High\",\n",
      "        \"alert_reason\": \"国际外交谈判破裂，地区紧张局势升级，暗示东海地区（通常指南海或东海）可能爆发大规模军事冲突，对全球稳定构成严重威胁。\"\n",
      "      }\n",
      "    },\n",
      "    {\n",
      "      \"tweet_id\": \"182510404040404040\",\n",
      "      \"author_username\": \"President_FR\",\n",
      "      \"tweet_text\": \"It was a pleasure to host the annual G7 summit on climate change today. International cooperation is key to securing a sustainable future for our planet.\",\n",
      "      \"market_impact\": [\n",
      "        {\n",
      "          \"market\": \"US Equities (S&P 500)\",\n",
      "          \"sentiment\": \"中性\",\n",
      "          \"reasoning\": \"G7气候峰会是例行外交事件，其成果通常是长期性的，对短期市场没有直接重大影响。\"\n",
      "        },\n",
      "        {\n",
      "          \"market\": \"Euro/USD Exchange Rate\",\n",
      "          \"sentiment\": \"中性\",\n",
      "          \"reasoning\": \"气候峰会和国际合作的声明是宏观议题，不会对欧元/美元汇率产生直接或可预测的波动。\"\n",
      "        },\n",
      "        {\n",
      "          \"market\": \"Gold\",\n",
      "          \"sentiment\": \"中性\",\n",
      "          \"reasoning\": \"气候峰会不涉及短期经济或地缘政治风险，因此对作为避险资产的黄金需求影响不大。\"\n",
      "        }\n",
      "      ],\n",
      "      \"black_swan_alert\": {\n",
      "        \"is_alert\": false,\n",
      "        \"category\": null,\n",
      "        \"severity\": null,\n",
      "        \"alert_reason\": null\n",
      "      }\n",
      "    },\n",
      "    {\n",
      "      \"tweet_id\": \"182510505050505050\",\n",
      "      \"author_username\": \"FinanceMinister_JP\",\n",
      "      \"tweet_text\": \"Japan has successfully negotiated a major bilateral trade deal with the United States, focused on reducing tariffs on technology and automotive components. This will reshape trade flows in the Pacific.\",\n",
      "      \"market_impact\": [\n",
      "        {\n",
      "          \"market\": \"US Equities (S&P 500)\",\n",
      "          \"sentiment\": \"利好\",\n",
      "          \"reasoning\": \"美日之间达成重要的双边贸易协议，特别是在科技和汽车领域，将降低关税，促进贸易和企业利润，利好美国股市。\"\n",
      "        },\n",
      "        {\n",
      "          \"market\": \"Euro/USD Exchange Rate\",\n",
      "          \"sentiment\": \"中性\",\n",
      "          \"reasoning\": \"该协议主要影响美日之间的贸易关系和太平洋地区的贸易流，对欧元与美元的汇率影响有限且间接。\"\n",
      "        },\n",
      "        {\n",
      "          \"market\": \"Gold\",\n",
      "          \"sentiment\": \"利空\",\n",
      "          \"reasoning\": \"重要的贸易协议降低了不确定性，可能提振经济增长预期，从而降低对避险资产黄金的需求。\"\n",
      "        }\n",
      "      ],\n",
      "      \"black_swan_alert\": {\n",
      "        \"is_alert\": false,\n",
      "        \"category\": null,\n",
      "        \"severity\": null,\n",
      "        \"alert_reason\": null\n",
      "      }\n",
      "    }\n",
      "  ]\n",
      "}\n"
     ]
    }
   ],
   "source": [
    "my_google_api_key = \"YOUR_GOOGLE_API_KEY\"  # TODO\n",
    "\n",
    "DUMMY_TWEETS_FILE = '../data/tweets_2025-08-01_00-00-00.json'\n",
    "example_tweets = [\n",
    "    {\n",
    "        \"id\": \"182510101010101010\",\n",
    "        \"author_username\": \"USTreasury\",\n",
    "        \"text\": \"BREAKING: The latest US jobs report far exceeds expectations, with unemployment dropping to a historic low. This is a clear sign of a robust and resilient American economy. Confidence is high.\",\n",
    "        \"created_at\": \"2025-08-18T13:00:00Z\"\n",
    "    },\n",
    "    {\n",
    "        \"id\": \"182510202020202020\",\n",
    "        \"author_username\": \"ECB_President\",\n",
    "        \"text\": \"Inflationary pressures in the Eurozone remain a primary concern. We must stay the course with tight monetary policy, which may unfortunately dampen short-term growth prospects for the bloc.\",\n",
    "        \"created_at\": \"2025-08-18T13:05:00Z\"\n",
    "    },\n",
    "    {\n",
    "        \"id\": \"182510303030303030\",\n",
    "        \"author_username\": \"UN_Spokesperson\",\n",
    "        \"text\": \"International diplomatic talks regarding the East Sea dispute have broken down without resolution. Tensions in the region are escalating, and we urge all parties to exercise maximum restraint.\",\n",
    "        \"created_at\": \"2025-08-18T13:10:00Z\"\n",
    "    },\n",
    "    {\n",
    "        \"id\": \"182510404040404040\",\n",
    "        \"author_username\": \"President_FR\",\n",
    "        \"text\": \"It was a pleasure to host the annual G7 summit on climate change today. International cooperation is key to securing a sustainable future for our planet.\",\n",
    "        \"created_at\": \"2025-08-18T13:15:00Z\"\n",
    "    },\n",
    "    {\n",
    "        \"id\": \"182510505050505050\",\n",
    "        \"author_username\": \"FinanceMinister_JP\",\n",
    "        \"text\": \"Japan has successfully negotiated a major bilateral trade deal with the United States, focused on reducing tariffs on technology and automotive components. This will reshape trade flows in the Pacific.\",\n",
    "        \"created_at\": \"2025-08-18T13:20:00Z\"\n",
    "    }\n",
    "]\n",
    "with open(DUMMY_TWEETS_FILE, 'w', encoding='utf-8') as f:\n",
    "    json.dump(example_tweets, f, indent=4)\n",
    "\n",
    "markets = [\"US Equities (S&P 500)\", \"Euro/USD Exchange Rate\", \"Gold\"]\n",
    "\n",
    "analysis_result_str = analyze_tweets_with_gemini(\n",
    "    tweets_filepath=DUMMY_TWEETS_FILE,\n",
    "    markets_list=markets,\n",
    "    google_api_key=my_google_api_key\n",
    ")\n",
    "\n",
    "if analysis_result_str:\n",
    "    print(\"\\n\\n--- 最终分析结果 (JSON) ---\")\n",
    "    parsed_json = json.loads(analysis_result_str)\n",
    "    pretty_json = json.dumps(parsed_json, indent=2, ensure_ascii=False)\n",
    "    print(pretty_json)\n",
    "else:\n",
    "    print(\"\\n\\n--- 分析流程失败 ---\")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "2c9c59f9",
   "metadata": {},
   "source": [
    "### 2.3：将分析结果持久化存储"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "id": "7613c5c8",
   "metadata": {},
   "outputs": [],
   "source": [
    "def save_analysis_to_file(\n",
    "    analysis_json_str: Optional[str],\n",
    "    source_tweets_filepath: str,\n",
    "    output_dir: str = '../analysis_reports'\n",
    ") -> str:\n",
    "    \"\"\"\n",
    "    将 Gemini 模型返回的分析结果(JSON字符串)保存到本地文件中。\n",
    "\n",
    "    Args:\n",
    "        analysis_json_str (Optional[str]): 包含分析结果的JSON字符串。\n",
    "        source_tweets_filepath (str): 用于生成分析的原始推文文件的路径。\n",
    "        output_dir (str, optional): 用于存放输出文件的目录名。\n",
    "                                    默认为 'analysis_reports'。\n",
    "\n",
    "    Returns:\n",
    "        str: 成功保存的文件路径。如果保存失败，则返回空字符串。\n",
    "    \"\"\"\n",
    "    if not analysis_json_str:\n",
    "        print(\"--- 分析结果为空，无需保存文件。 ---\")\n",
    "        return \"\"\n",
    "\n",
    "    try:\n",
    "        # 步骤 1: 确保输出目录存在\n",
    "        os.makedirs(output_dir, exist_ok=True)\n",
    "\n",
    "        # 步骤 2: 根据原始文件名生成报告文件名，建立关联\n",
    "        base_name = os.path.basename(source_tweets_filepath)\n",
    "        report_filename = base_name.replace('tweets_', 'analysis_report_')\n",
    "        \n",
    "        # 步骤 3: 构造完整的文件路径\n",
    "        file_path = os.path.join(output_dir, report_filename)\n",
    "\n",
    "        # 步骤 4: 将JSON字符串解析为Python对象，再美化后写入文件\n",
    "        print(\"\\n--- 正在将分析结果保存到文件... ---\")\n",
    "        parsed_data = json.loads(analysis_json_str)\n",
    "        with open(file_path, 'w', encoding='utf-8') as f:\n",
    "            json.dump(parsed_data, f, ensure_ascii=False, indent=2)\n",
    "        \n",
    "        print(f\"分析报告保存成功！文件路径: {file_path}\")\n",
    "        return file_path\n",
    "\n",
    "    except json.JSONDecodeError:\n",
    "        print(f\"错误: 无法解析分析结果字符串为JSON。原始字符串: '{analysis_json_str[:100]}...'\")\n",
    "        return \"\"\n",
    "    except Exception as e:\n",
    "        print(f\"错误：保存分析文件失败。原因: {e}\")\n",
    "        return \"\""
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "id": "02e8ef79",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "--- 正在将分析结果保存到文件... ---\n",
      "分析报告保存成功！文件路径: ../analysis_reports\\analysis_report_2025-08-01_00-00-00.json\n"
     ]
    }
   ],
   "source": [
    "saved_report_path = save_analysis_to_file(\n",
    "    analysis_json_str=analysis_result_str,\n",
    "    source_tweets_filepath=DUMMY_TWEETS_FILE,\n",
    "    output_dir='../analysis_reports'\n",
    ")"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "da-env",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.11.12"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
