# 声明：本代码仅供学习和研究目的使用。使用者应遵守以下原则：  
# 1. 不得用于任何商业用途。  
# 2. 使用时应遵守目标平台的使用条款和robots.txt规则。  
# 3. 不得进行大规模爬取或对平台造成运营干扰。  
# 4. 应合理控制请求频率，避免给目标平台带来不必要的负担。   
# 5. 不得用于任何非法或不当的用途。
#   
# 详细许可条款请参阅项目根目录下的LICENSE文件。  
# 使用本代码即表示您同意遵守上述原则和LICENSE中的所有条款。  


import asyncio
import os
import random
from asyncio import Task
from typing import Any, Dict, List, Optional, Tuple

from playwright.async_api import (BrowserContext, BrowserType, Page,
                                  async_playwright)

import config
import db
from base.base_crawler import AbstractCrawler
from proxy.proxy_ip_pool import IpInfoModel, create_ip_pool
from store import douyin as douyin_store
from tools import utils
from var import crawler_type_var, source_keyword_var
from .client import DOUYINClient
from .exception import *
from .field import PublishTimeType
from .login import DouYinLogin


class DouYinCrawler(AbstractCrawler):
    context_page: Page
    douyin_client: DOUYINClient
    browser_context: BrowserContext

    def __init__(self, **kwargs) -> None:
        super().__init__(**kwargs)
        self.index_url = "https://www.douyin.com"
        
        # 从kwargs中获取限制值
        self.max_notes_count = kwargs.get('max_notes_count', 8)
        self.max_comments_count = kwargs.get('max_comments_count', 8)
        self.max_fans_count = kwargs.get('max_fans_count', 8)
        self.max_follower_count = kwargs.get('max_follower_count', 8)
        self.keywords = kwargs.get('keywords', None)
        self.creator_id_list = kwargs.get('creator_id_list', None)
        
        utils.logger.info(f"[DouYinCrawler.__init__] 初始化爬虫实例:")
        utils.logger.info(f"[DouYinCrawler.__init__] - max_notes_count: {self.max_notes_count}")
        utils.logger.info(f"[DouYinCrawler.__init__] - max_comments_count: {self.max_comments_count}")
        utils.logger.info(f"[DouYinCrawler.__init__] - max_fans_count: {self.max_fans_count}")
        utils.logger.info(f"[DouYinCrawler.__init__] - max_follower_count: {self.max_follower_count}")

    async def start(self) -> None:
        """Start the crawler"""
        try:
            utils.logger.info("[DouYinCrawler.start] Starting crawler...")
            
            # 初始化数据库连接
            await db.init_db()
            
            playwright_proxy_format, httpx_proxy_format = None, None
            
            if config.ENABLE_IP_PROXY:
                utils.logger.info("[DouYinCrawler.start] Setting up proxy...")
                ip_proxy_pool = await create_ip_pool(config.IP_PROXY_POOL_COUNT, enable_validate_ip=True)
                ip_proxy_info: IpInfoModel = await ip_proxy_pool.get_proxy()
                playwright_proxy_format, httpx_proxy_format = self.format_proxy_info(ip_proxy_info)

            async with async_playwright() as playwright:
                try:
                    utils.logger.info("[DouYinCrawler.start] Creating playwright chromium instance...")
                    chromium = playwright.chromium
                    
                    # 强制设置为非无头模式
                    config.HEADLESS = False
                    
                    utils.logger.info("[DouYinCrawler.start] Launching browser...")
                    self.browser_context = await self.launch_browser(
                        chromium,
                        playwright_proxy_format,
                        user_agent=None,
                        headless=False
                    )
                    
                    utils.logger.info("[DouYinCrawler.start] Adding stealth script...")
                    await self.browser_context.add_init_script(path="libs/stealth.min.js")
                    
                    utils.logger.info("[DouYinCrawler.start] Creating new page...")
                    self.context_page = await self.browser_context.new_page()
                    
                    utils.logger.info(f"[DouYinCrawler.start] Navigating to {self.index_url}...")
                    await self.context_page.goto(self.index_url)

                    utils.logger.info("[DouYinCrawler.start] Creating douyin client...")
                    self.douyin_client = await self.create_douyin_client(httpx_proxy_format)
                    
                    utils.logger.info("[DouYinCrawler.start] Checking login status...")
                    max_login_retries = 3
                    for login_attempt in range(max_login_retries):
                        try:
                            if not await self.douyin_client.pong(self.context_page):
                                utils.logger.info(f"[DouYinCrawler.start] Login required... (Attempt {login_attempt + 1}/{max_login_retries})")
                                
                                login_obj = DouYinLogin(
                                    login_type="qrcode",  # 直接使用二维码登录
                                    login_phone="",
                                    browser_context=self.browser_context,
                                    context_page=self.context_page,
                                    cookie_str=""
                                )
                                
                                await login_obj.begin()
                                await self.douyin_client.update_cookies(browser_context=self.browser_context)
                                
                                # 再次验证登录状态
                                if await self.douyin_client.pong(self.context_page):
                                    utils.logger.info("[DouYinCrawler.start] Login successful!")
                                    break
                            else:
                                utils.logger.info("[DouYinCrawler.start] Already logged in")
                                break
                                
                        except Exception as e:
                            utils.logger.error(f"[DouYinCrawler.start] Login attempt {login_attempt + 1} failed: {str(e)}")
                            if login_attempt == max_login_retries - 1:
                                utils.logger.error("[DouYinCrawler.start] 登录失败，请扫描二维码登录")
                                await asyncio.sleep(120)  # 给用户2分钟时间扫码
                            await asyncio.sleep(2)  # 等待一段时间后重试
                    
                    crawler_type_var.set(config.CRAWLER_TYPE)
                    if config.CRAWLER_TYPE == "search":
                        await self.search()
                    elif config.CRAWLER_TYPE == "detail":
                        await self.get_specified_notes()
                    elif config.CRAWLER_TYPE == "creator":
                        await self.get_creators_and_notes()
                    elif config.CRAWLER_TYPE == "fans":
                        await self.get_creator_fans()
                    elif config.CRAWLER_TYPE == "followers":
                        await self.get_creator_followers()

                    utils.logger.info("[DouYinCrawler.start] Douyin Crawler finished successfully")
                except Exception as e:
                    utils.logger.error(f"[DouYinCrawler.start] Error during browser operations: {str(e)}")
                    raise
                finally:
                    if self.browser_context:
                        await self.browser_context.close()
                        utils.logger.info("[DouYinCrawler.start] Browser context closed")
        except Exception as e:
            utils.logger.error(f"[DouYinCrawler.start] Fatal error: {str(e)}")
            raise

    async def search(self) -> None:
        """搜索并存储数据到数据库
        只采集：
        1. 关键词相关的帖子
        2. 帖子的评论
        3. 评论人的基本信息
        """
        utils.logger.info("[DouYinCrawler.search] 开始搜索抖音关键词")
        if not self.keywords:
            utils.logger.error("[DouYinCrawler.search] 未提供关键词")
            return
            
        for keyword in self.keywords.split(","):
            source_keyword_var.set(keyword)
            utils.logger.info(f"[DouYinCrawler.search] 当前搜索关键词: {keyword}")
            
            try:
                utils.logger.info(f"[DouYinCrawler.search] 搜索抖音关键词: {keyword}")
                max_count = self.max_notes_count
                utils.logger.info(f"[DouYinCrawler.search] 最大笔记数量: {max_count}")
                
                # 检查客户端状态
                if not await self.douyin_client.pong(self.context_page):
                    utils.logger.error("[DouYinCrawler.search] 客户端连接检查失败")
                    continue
                
                processed_count = 0
                offset = 0
                
                while processed_count < max_count:
                    # 搜索帖子
                    search_result = await self.douyin_client.search_info_by_keyword(
                        keyword=keyword,
                        offset=offset,
                        publish_time=PublishTimeType.UNLIMITED
                    )
                    
                    if not search_result or "data" not in search_result:
                        utils.logger.error(f"[DouYinCrawler.search] 搜索结果为空或无效: {search_result}")
                        break
                        
                    # 处理搜索结果
                    items = search_result.get("data", [])
                    if not items:
                        utils.logger.info("[DouYinCrawler.search] 没有更多数据")
                        break
                        
                    for item in items:
                        if processed_count >= max_count:
                            break
                            
                        try:
                            # 获取帖子信息
                            aweme_info = item.get("aweme_info") or item.get("aweme_mix_info", {}).get("mix_items", [{}])[0]
                            if not aweme_info:
                                continue
                                
                            # 存储帖子信息
                            await douyin_store.update_douyin_aweme(aweme_info)
                            utils.logger.info(f"[DouYinCrawler.search] 已保存帖子 {processed_count + 1}/{max_count}: {aweme_info.get('aweme_id', 'No ID')}")
                            
                            # 获取并存储评论信息
                            if "aweme_id" in aweme_info:
                                comments_response = await self.douyin_client.get_aweme_comments(
                                    aweme_id=aweme_info["aweme_id"],
                                    cursor=0
                                )
                                
                                if comments_response and "comments" in comments_response:
                                    for comment in comments_response["comments"][:self.max_comments_count]:
                                        # 存储评论
                                        await douyin_store.update_dy_aweme_comment(aweme_info["aweme_id"], comment)
                                        
                                        # 存储评论用户信息
                                        if "user" in comment:
                                            await douyin_store.update_douyin_user(comment["user"])
                                            
                                    utils.logger.info(f"[DouYinCrawler.search] 已保存评论数据，帖子ID: {aweme_info['aweme_id']}")
                                    
                            processed_count += 1
                            
                        except Exception as e:
                            utils.logger.error(f"[DouYinCrawler.search] 处理搜索结果出错: {str(e)}")
                            continue
                            
                    # 更新偏移量，继续获取下一页
                    offset += len(items)
                    
                    # 如果还需要更多数据，等待一下再继续
                    if processed_count < max_count:
                        await asyncio.sleep(1)  # 避免请求过快
                        
            except Exception as e:
                utils.logger.error(f"[DouYinCrawler.search] 搜索关键词出错, keyword: {keyword}, err: {str(e)}")
                continue

    async def get_creator_fans(self) -> None:
        """获取创作者的粉丝信息"""
        if not self.creator_id_list:
            utils.logger.error("[DouYin] 未提供创作者ID列表")
            return
            
        utils.logger.info(f"[DouYin] 开始获取 {len(self.creator_id_list)} 个创作者的粉丝...")
        success_count = 0
        
        for creator_id in self.creator_id_list:
            try:
                await self.get_creator_fans(creator_id, self.max_fans_count or 100)
                success_count += 1
            except Exception as e:
                utils.logger.error(f"[DouYin] 获取创作者 {creator_id} 的粉丝失败: {str(e)}")
                continue
                
        utils.logger.info(f"[DouYin] 粉丝数据获取完成，成功处理 {success_count}/{len(self.creator_id_list)} 个创作者")

    async def get_creator_followers(self) -> None:
        """获取创作者的关注者信息"""
        if not self.creator_id_list:
            utils.logger.error("[DouYin] 未提供创作者ID列表")
            return
            
        utils.logger.info(f"[DouYin] 开始获取 {len(self.creator_id_list)} 个创作者的关注...")
        success_count = 0
        
        for creator_id in self.creator_id_list:
            try:
                await self.get_creator_followers(creator_id, self.max_follower_count or 100)
                success_count += 1
            except Exception as e:
                utils.logger.error(f"[DouYin] 获取创作者 {creator_id} 的关注失败: {str(e)}")
                continue
                
        utils.logger.info(f"[DouYin] 关注数据获取完成，成功处理 {success_count}/{len(self.creator_id_list)} 个创作者")

    async def get_specified_notes(self, note_list: Optional[List[Dict]] = None) -> None:
        """Get the information of the specified notes without comments
        
        Args:
            note_list: List of notes to process
        """
        utils.logger.info(f"[DouYinCrawler.get_specified_notes] Starting to process notes, list size: {len(note_list) if note_list else 0}")
        
        # 确保数据库连接可用
        try:
            await db.init_db()
        except Exception as e:
            utils.logger.error(f"[DouYinCrawler.get_specified_notes] Failed to initialize database: {str(e)}")
            return
            
        creator_ids = set()  # 用于存储所有需要处理的创作者ID
        
        if note_list is None and self.creator_id_list:
            note_list = []
            for creator_id in self.creator_id_list:
                try:
                    # 获取创作者信息并存储
                    creator_info = await self.douyin_client.get_user_info(creator_id)
                    if creator_info and "user" in creator_info:
                        await douyin_store.save_creator(creator_id, creator_info)
                        utils.logger.info(f"[DouYinCrawler.get_specified_notes] Saved creator info for {creator_id}")
                        
                        # 添加到待处理的创作者集合
                        user_id = creator_info["user"].get("uid")
                        if user_id:
                            creator_ids.add((creator_id, user_id))
                    
                    # 获取帖子列表
                    notes = await self.douyin_client.get_creator_notes(creator_id, need_comments=False)
                    if notes:
                        note_list.extend(notes)
                        utils.logger.info(f"[DouYinCrawler.get_specified_notes] Got {len(notes)} notes for creator {creator_id}")
                        
                except Exception as e:
                    utils.logger.error(f"[DouYinCrawler.get_specified_notes] Error getting notes for creator {creator_id}: {str(e)}")
                    continue

        if not note_list:
            utils.logger.warning("[DouYinCrawler.get_specified_notes] No notes to process")
            return

        # 处理帖子并收集创作者信息
        processed_count = 0
        for note in note_list:
            try:
                # 存储帖子信息
                await douyin_store.update_douyin_aweme(note)
                processed_count += 1
                utils.logger.info(f"[DouYinCrawler.get_specified_notes] Successfully processed note {processed_count}/{len(note_list)}")
                
                # 收集创作者信息
                author = note.get("author", {})
                if author:
                    sec_uid = author.get("sec_uid")
                    uid = author.get("uid")
                    if sec_uid and uid:
                        creator_ids.add((sec_uid, uid))
                    
            except Exception as e:
                utils.logger.error(f"[DouYinCrawler.get_specified_notes] Error processing note: {str(e)}")
                continue
                
        utils.logger.info(f"[DouYinCrawler.get_specified_notes] Completed processing {processed_count} notes")
        
        # 处理所有收集到的创作者信息
        for creator_id, user_id in creator_ids:
            try:
                utils.logger.info(f"[DouYinCrawler.get_specified_notes] Processing creator {creator_id}")
                
                # 获取并存储创作者信息
                creator_info = await self.douyin_client.get_user_info(creator_id)
                if creator_info and "user" in creator_info:
                    await douyin_store.save_creator(creator_id, creator_info)
                    utils.logger.info(f"[DouYinCrawler.get_specified_notes] Saved info for creator {creator_id}")
                
                # 获取并存储粉丝数据
                fans = await self.douyin_client.get_creator_fans(
                    creator_id,
                    user_id,
                    max_count=self.max_fans_count
                )
                utils.logger.info(f"[DouYinCrawler.get_specified_notes] Got {len(fans)} fans for creator {creator_id}")
                
                for fan in fans:
                    relation_data = {
                        "user_id": creator_id,
                        "r_user_id": fan.get("sec_uid", ""),
                        "r_type": "0"  # 0表示粉丝关系
                    }
                    await douyin_store.save_relation_user(relation_data)
                
                # 获取并存储关注数据
                followers = await self.douyin_client.get_creator_followers(
                    creator_id,
                    user_id,
                    max_count=self.max_follower_count
                )
                utils.logger.info(f"[DouYinCrawler.get_specified_notes] Got {len(followers)} followers for creator {creator_id}")
                
                for follower in followers:
                    relation_data = {
                        "user_id": creator_id,
                        "r_user_id": follower.get("sec_uid", ""),
                        "r_type": "1"  # 1表示关注关系
                    }
                    await douyin_store.save_relation_user(relation_data)
                    
            except Exception as e:
                utils.logger.error(f"[DouYinCrawler.get_specified_notes] Error processing creator {creator_id}: {str(e)}")
                continue

    async def get_note_comments(self, aweme_id: str, max_comments: int) -> None:
        """Get comments for a specific note"""
        try:
            utils.logger.info(f"[DouYinCrawler.get_note_comments] Getting comments for aweme_id: {aweme_id}, max_comments: {max_comments}")
            comments_response = await self.douyin_client.get_aweme_comments(
                aweme_id=aweme_id,
                cursor=0
            )
            
            if comments_response and "comments" in comments_response:
                processed_count = 0
                for comment in comments_response["comments"]:
                    if processed_count >= max_comments:
                        break
                        
                    # 保存评论数据
                    await douyin_store.update_dy_aweme_comment(aweme_id, comment)
                    
                    # 保存评论用户信息
                    if "user" in comment:
                        await douyin_store.update_douyin_user(comment["user"])
                        
                    processed_count += 1
                utils.logger.info(f"[DouYinCrawler.get_note_comments] Successfully processed {processed_count} comments")
        except Exception as e:
            utils.logger.error(f"[DouYinCrawler.get_note_comments] Get note comments error: {str(e)}")
            import traceback
            utils.logger.error(f"[DouYinCrawler.get_note_comments] Error traceback: {traceback.format_exc()}")

    async def launch_browser(self, chromium: BrowserType, playwright_proxy: Optional[Dict], user_agent: Optional[str],
                             headless: bool = True) -> BrowserContext:
        """Launch browser and create browser context"""
        utils.logger.info("[DouYinCrawler.launch_browser] Begin create browser context ...")
        if config.SAVE_LOGIN_STATE:
            user_data_dir = os.path.join(os.getcwd(), "browser_data",
                                         config.USER_DATA_DIR % config.PLATFORM)  # type: ignore
            browser_context = await chromium.launch_persistent_context(
                user_data_dir=user_data_dir,
                accept_downloads=True,
                headless=headless,
                proxy=playwright_proxy,  # type: ignore
                viewport={"width": 1920, "height": 1080},
                user_agent=user_agent
            )
            return browser_context
        else:
            browser = await chromium.launch(headless=headless, proxy=playwright_proxy)  # type: ignore
            browser_context = await browser.new_context(
                viewport={"width": 1920, "height": 1080},
                user_agent=user_agent
            )
            return browser_context

    async def close(self):
        """Close browser context"""
        if self.browser_context:
            await self.browser_context.close()
            utils.logger.info("[DouYinCrawler.close] Browser context closed ...")

    @staticmethod
    def format_proxy_info(ip_proxy_info: IpInfoModel) -> Tuple[Optional[Dict], Optional[Dict]]:
        """format proxy info for playwright and httpx"""
        playwright_proxy = {
            "server": f"{ip_proxy_info.protocol}{ip_proxy_info.ip}:{ip_proxy_info.port}",
            "username": ip_proxy_info.user,
            "password": ip_proxy_info.password,
        }
        httpx_proxy = {
            f"{ip_proxy_info.protocol}": f"http://{ip_proxy_info.user}:{ip_proxy_info.password}@{ip_proxy_info.ip}:{ip_proxy_info.port}"
        }
        return playwright_proxy, httpx_proxy

    async def create_douyin_client(self, proxy_format: Optional[str] = None) -> DOUYINClient:
        """
        创建抖音客户端实例
        :param proxy_format: 代理格式
        :return: DOUYINClient实例
        """
        utils.logger.info("[DouYinCrawler.create_douyin_client] Creating douyin client...")
        
        # 获取当前的cookie
        current_cookies = await self.browser_context.cookies()
        cookie_str, cookie_dict = utils.convert_cookies(current_cookies)
        
        # 构建请求头
        headers = {
            "User-Agent": config.UA,
            "Cookie": cookie_str,
            "Accept": "application/json, text/plain, */*",
            "Accept-Language": "zh-CN,zh;q=0.9",
            "Origin": "https://www.douyin.com",
            "Referer": "https://www.douyin.com/",
            "Content-Type": "application/json",
        }
        
        # 创建客户端实例，使用实例的限制参数
        client = DOUYINClient(
            timeout=30,
            proxies={"http": proxy_format, "https": proxy_format} if proxy_format else None,
            headers=headers,
            playwright_page=self.context_page,
            cookie_dict=cookie_dict,
            max_notes_count=self.max_notes_count,
            max_comments_count=self.max_comments_count,
            max_fans_count=self.max_fans_count,
            max_follower_count=self.max_follower_count
        )
        
        utils.logger.info(f"[DouYinCrawler.create_douyin_client] Created client with limits:")
        utils.logger.info(f"[DouYinCrawler.create_douyin_client] - max_notes_count: {client.max_notes_count}")
        utils.logger.info(f"[DouYinCrawler.create_douyin_client] - max_comments_count: {client.max_comments_count}")
        utils.logger.info(f"[DouYinCrawler.create_douyin_client] - max_fans_count: {client.max_fans_count}")
        utils.logger.info(f"[DouYinCrawler.create_douyin_client] - max_follower_count: {client.max_follower_count}")
        
        return client

    async def get_creator_fans(self, creator_id: str, max_count: int = None) -> None:
        """获取创作者的粉丝列表并存储到数据库
        
        Args:
            creator_id: 创作者ID
            max_count: 最大获取数量
        """
        try:
            utils.logger.info(f"[DouYinCrawler.get_creator_fans] 开始获取创作者 {creator_id} 的粉丝列表")
            
            # 获取用户信息
            user_info = await self.douyin_client.get_creator_info(creator_id)
            if not user_info:
                utils.logger.error(f"[DouYinCrawler.get_creator_fans] 无法获取创作者信息: {creator_id}")
                return
                
            user_id = user_info.get("uid", "")
            if not user_id:
                utils.logger.error(f"[DouYinCrawler.get_creator_fans] 无法获取用户ID: {creator_id}")
                return
                
            # 设置最大获取数量
            max_fans = max_count or self.max_fans_count
            utils.logger.info(f"[DouYinCrawler.get_creator_fans] 最大粉丝获取数量: {max_fans}")
            
            # 获取粉丝列表
            fans_list = await self.douyin_client.get_creator_fans(
                sec_user_id=creator_id,
                user_id=user_id,
                max_count=max_fans
            )
            
            if not fans_list:
                utils.logger.warning(f"[DouYinCrawler.get_creator_fans] 未获取到粉丝数据: {creator_id}")
                return
                
            utils.logger.info(f"[DouYinCrawler.get_creator_fans] 获取到 {len(fans_list)} 个粉丝")
            
            # 批量存储粉丝数据
            success_count = 0
            for fan in fans_list:
                try:
                    # 存储粉丝用户信息
                    await douyin_store.update_douyin_user(fan)
                    
                    # 存储关系数据
                    relation_data = {
                        "user_id": creator_id,
                        "r_user_id": fan.get("sec_uid", ""),
                        "r_type": 0,  # 0表示粉丝关系
                        "platform": "dy"
                    }
                    await douyin_store.save_relation_user(relation_data)
                    success_count += 1
                    
                except Exception as store_err:
                    utils.logger.error(f"[DouYinCrawler.get_creator_fans] 存储粉丝数据失败: {str(store_err)}")
                    continue
                    
            utils.logger.info(f"[DouYinCrawler.get_creator_fans] 成功存储 {success_count}/{len(fans_list)} 个粉丝数据")
            
        except Exception as e:
            utils.logger.error(f"[DouYinCrawler.get_creator_fans] 获取粉丝列表失败: {str(e)}")
            import traceback
            utils.logger.error(f"[DouYinCrawler.get_creator_fans] 错误堆栈: {traceback.format_exc()}")

    async def get_creator_followers(self, creator_id: str, max_count: int = None) -> None:
        """获取创作者的关注列表并存储到数据库
        
        Args:
            creator_id: 创作者ID
            max_count: 最大获取数量
        """
        try:
            utils.logger.info(f"[DouYinCrawler.get_creator_followers] 开始获取创作者 {creator_id} 的关注列表")
            
            # 获取用户信息
            user_info = await self.douyin_client.get_creator_info(creator_id)
            if not user_info:
                utils.logger.error(f"[DouYinCrawler.get_creator_followers] 无法获取创作者信息: {creator_id}")
                return
                
            user_id = user_info.get("uid", "")
            if not user_id:
                utils.logger.error(f"[DouYinCrawler.get_creator_followers] 无法获取用户ID: {creator_id}")
                return
                
            # 设置最大获取数量
            max_followers = max_count or self.max_follower_count
            utils.logger.info(f"[DouYinCrawler.get_creator_followers] 最大关注获取数量: {max_followers}")
            
            # 获取关注列表
            followers_list = await self.douyin_client.get_creator_followers(
                sec_user_id=creator_id,
                user_id=user_id,
                max_count=max_followers
            )
            
            if not followers_list:
                utils.logger.warning(f"[DouYinCrawler.get_creator_followers] 未获取到关注数据: {creator_id}")
                return
                
            utils.logger.info(f"[DouYinCrawler.get_creator_followers] 获取到 {len(followers_list)} 个关注")
            
            # 批量存储关注数据
            success_count = 0
            for follower in followers_list:
                try:
                    # 存储关注用户信息
                    await douyin_store.update_douyin_user(follower)
                    
                    # 存储关系数据
                    relation_data = {
                        "user_id": creator_id,
                        "r_user_id": follower.get("sec_uid", ""),
                        "r_type": 1,  # 1表示关注关系
                        "platform": "dy"
                    }
                    await douyin_store.save_relation_user(relation_data)
                    success_count += 1
                    
                except Exception as store_err:
                    utils.logger.error(f"[DouYinCrawler.get_creator_followers] 存储关注数据失败: {str(store_err)}")
                    continue
                    
            utils.logger.info(f"[DouYinCrawler.get_creator_followers] 成功存储 {success_count}/{len(followers_list)} 个关注数据")
            
        except Exception as e:
            utils.logger.error(f"[DouYinCrawler.get_creator_followers] 获取关注列表失败: {str(e)}")
            import traceback
            utils.logger.error(f"[DouYinCrawler.get_creator_followers] 错误堆栈: {traceback.format_exc()}")

    async def process_note(self, note: Dict) -> None:
        """Process a single note
        Args:
            note: Note data dictionary
        """
        utils.logger.info(f"[DouYinCrawler.process_note] Processing note: {note.get('aweme_id', 'No ID')}")
        
        try:
            # 更新笔记信息
            await douyin_store.update_douyin_aweme(note)
            
            # 获取评论
            if note.get("aweme_id"):
                await self.get_note_comments(note["aweme_id"], self.max_comments_count)
            else:
                utils.logger.warning("[DouYinCrawler.process_note] Note has no aweme_id")
                
        except Exception as e:
            utils.logger.error(f"[DouYinCrawler.process_note] Error processing note {note.get('aweme_id', 'No ID')}: {str(e)}")
            import traceback
            utils.logger.error(f"[DouYinCrawler.process_note] Error traceback: {traceback.format_exc()}")
            raise  # 向上传播异常以便调用者处理

    async def get_creators_and_notes(self) -> None:
        """获取创作者帖子数据
        只采集：
        1. 创作者的帖子
        2. 帖子的评论
        3. 评论人的基本信息
        """
        if not self.creator_id_list:
            utils.logger.error("[DouYinCrawler.get_creators_and_notes] 未提供创作者ID列表")
            return
            
        utils.logger.info(f"[DouYinCrawler.get_creators_and_notes] 开始获取 {len(self.creator_id_list)} 个创作者的帖子数据")
        success_count = 0
        
        for creator_id in self.creator_id_list:
            try:
                utils.logger.info(f"[DouYinCrawler.get_creators_and_notes] 处理创作者 {creator_id} 的帖子")
                
                # 获取创作者的帖子列表
                creator_notes = await self.douyin_client.get_all_user_aweme_posts(sec_user_id=creator_id)
                
                if not creator_notes:
                    utils.logger.warning(f"[DouYinCrawler.get_creators_and_notes] 未获取到创作者 {creator_id} 的帖子")
                    continue
                    
                # 随机选择指定数量的帖子
                if len(creator_notes) > self.max_notes_count:
                    selected_notes = random.sample(creator_notes, self.max_notes_count)
                else:
                    selected_notes = creator_notes
                    
                utils.logger.info(f"[DouYinCrawler.get_creators_and_notes] 开始处理 {len(selected_notes)} 个帖子")
                
                # 处理每个帖子
                processed_count = 0
                for note in selected_notes:
                    try:
                        # 存储帖子信息
                        await douyin_store.update_douyin_aweme(note)
                        
                        # 获取并存储评论信息
                        if "aweme_id" in note:
                            comments_response = await self.douyin_client.get_aweme_comments(
                                aweme_id=note["aweme_id"],
                                cursor=0
                            )
                            
                            if comments_response and "comments" in comments_response:
                                for comment in comments_response["comments"][:self.max_comments_count]:
                                    # 存储评论
                                    await douyin_store.update_dy_aweme_comment(note["aweme_id"], comment)
                                    
                                    # 存储评论用户信息
                                    if "user" in comment:
                                        await douyin_store.update_douyin_user(comment["user"])
                                        
                                utils.logger.info(f"[DouYinCrawler.get_creators_and_notes] 已保存评论数据，帖子ID: {note['aweme_id']}")
                                
                        processed_count += 1
                        utils.logger.info(f"[DouYinCrawler.get_creators_and_notes] 已处理 {processed_count}/{len(selected_notes)} 个帖子")
                        
                    except Exception as e:
                        utils.logger.error(f"[DouYinCrawler.get_creators_and_notes] 处理帖子出错: {str(e)}")
                        continue
                        
                if processed_count > 0:
                    success_count += 1
                    
                # 处理完一个创作者后等待一下，避免请求过快
                await asyncio.sleep(1)
                    
            except Exception as e:
                utils.logger.error(f"[DouYinCrawler.get_creators_and_notes] 处理创作者 {creator_id} 时出错: {str(e)}")
                continue
                
        utils.logger.info(f"[DouYinCrawler.get_creators_and_notes] 数据获取完成，成功处理 {success_count}/{len(self.creator_id_list)} 个创作者的帖子")