import asyncio
import config
from db import init_db, close_db, init_mediacrawler_db
from typing import List, Optional, Dict, Any
from base.base_crawler import AbstractCrawler
from media_platform.bilibili import BilibiliCrawler
from media_platform.douyin import DouYinCrawler
from media_platform.kuaishou import KuaishouCrawler
from media_platform.tieba import TieBaCrawler
from media_platform.weibo import WeiboCrawler
from media_platform.xhs import XiaoHongShuCrawler
from media_platform.zhihu import ZhihuCrawler
from webui_db import DataManager
from tools import utils

class CrawlerManager:
    PLATFORM_MAP = {
        "抖音": "dy",
        "微博": "wb",
        "小红书": "xhs"
    }
    
    CRAWLERS = {
        "xhs": XiaoHongShuCrawler,
        "dy": DouYinCrawler,
        "ks": KuaishouCrawler,
        "bili": BilibiliCrawler,
        "wb": WeiboCrawler,
        "tieba": TieBaCrawler,
        "zhihu": ZhihuCrawler
    }
    
    def __init__(self):
        self.data_manager = DataManager()
        self.current_crawler: Optional[AbstractCrawler] = None
        self._is_initialized = False
        self._browser_context = None
        self._browser = None
        
    async def initialize(self):
        """初始化数据库连接"""
        if not self._is_initialized:
            await init_mediacrawler_db()
            self._is_initialized = True
    
    async def cleanup(self):
        """清理资源"""
        try:
            # 首先清理爬虫实例
            await self._cleanup_crawler()
            
            # 然后关闭浏览器
            if self._browser:
                try:
                    await self._browser.close()
                except Exception as e:
                    utils.logger.error(f"[CrawlerManager.cleanup] 关闭浏览器时出错: {str(e)}")
                finally:
                    self._browser = None
                    
            # 最后关闭数据库连接
            if self._is_initialized:
                try:
                    await self.data_manager.close()
                    await close_db()
                except Exception as e:
                    utils.logger.error(f"[CrawlerManager.cleanup] 关闭数据库连接时出错: {str(e)}")
                finally:
                    self._is_initialized = False
                    
        except Exception as e:
            utils.logger.error(f"[CrawlerManager.cleanup] 清理资源时出错: {str(e)}")
    
    async def _cleanup_crawler(self):
        """清理爬虫实例"""
        try:
            if hasattr(self, 'current_crawler') and self.current_crawler:
                try:
                    await self.current_crawler.close()
                except Exception as e:
                    utils.logger.warning(f"[CrawlerManager._cleanup_crawler] 关闭爬虫实例时出现非致命错误: {str(e)}")
                finally:
                    self.current_crawler = None
        except Exception as e:
            utils.logger.error(f"[CrawlerManager._cleanup_crawler] 清理爬虫实例时出错: {str(e)}")
        finally:
            try:
                await close_db()
            except Exception as e:
                utils.logger.warning(f"[CrawlerManager._cleanup_crawler] 关闭数据库连接时出现非致命错误: {str(e)}")
    
    async def _create_and_setup_crawler(self, platform: str, task_type: str, **kwargs) -> None:
        """创建并设置爬虫实例"""
        try:
            # 确保数据库已初始化
            await self.initialize()
            
            # 确保之前的爬虫实例被清理
            await self._cleanup_crawler()
            
            # 创建新的爬虫实例
            crawler_class = self.CRAWLERS.get(platform)
            if not crawler_class:
                raise ValueError("不支持的平台类型")
            
            # 统一参数名称
            crawler_params = {
                'max_notes_count': kwargs.get('max_notes_count'),
                'max_comments_count': kwargs.get('max_comments_count'),
                'max_fans_count': kwargs.get('max_fans_count', None),
                'max_follower_count': kwargs.get('max_follower_count', None),
                'keywords': kwargs.get('KEYWORDS', None),
                'creator_id_list': kwargs.get('DY_CREATOR_ID_LIST', None)
            }
            
            # 创建爬虫实例，只传递非None的参数
            self.current_crawler = crawler_class(**{k: v for k, v in crawler_params.items() if v is not None})
            
            # 复用浏览器和上下文
            if hasattr(self.current_crawler, 'browser') and self._browser:
                self.current_crawler.browser = self._browser
                self._browser = None
            if hasattr(self.current_crawler, 'browser_context') and self._browser_context:
                self.current_crawler.browser_context = self._browser_context
                self._browser_context = None
            
            # 设置爬虫类型
            config.CRAWLER_TYPE = task_type
            
            # 启动爬虫
            await self.current_crawler.start()
            
        except Exception as e:
            utils.logger.error(f"启动爬虫时出错: {str(e)}")
            await self._cleanup_crawler()
            raise

    async def collect_all_data(self, platform: str, keywords: str,
                             posts_limit: int = 10,
                             comments_limit: int = 20,
                             fans_limit: int = 100,
                             follows_limit: int = 100,
                             status_callback: Optional[callable] = None) -> None:
        """一次性采集所有相关数据"""
        try:
            # 基础配置
            config.SAVE_DATA_OPTION = "db"
            platform_code = self.PLATFORM_MAP.get(platform)
            keyword_list = [k.strip() for k in keywords.split(",") if k.strip()]
            total_keywords = len(keyword_list)
            
            for keyword_idx, keyword in enumerate(keyword_list):
                current_progress = keyword_idx / total_keywords
                if status_callback:
                    status_callback(f"开始采集关键词 '{keyword}' 的数据...", current_progress)
                
                try:
                    # 1. 采集帖子数据
                    if status_callback:
                        status_callback(f"正在采集关键词 '{keyword}' 的帖子数据...", current_progress)
                    
                    await self._create_and_setup_crawler(
                        platform=platform_code,
                        task_type="search",
                        KEYWORDS=keyword,
                        CRAWLER_MAX_NOTES_COUNT=posts_limit,
                        MAX_COMMENTS_COUNT=comments_limit
                    )
                    
                    # 2. 获取已采集帖子的用户ID和sec_uid
                    user_data = await self.data_manager.get_unique_user_ids_and_sec_uids(platform_code)
                    total_users = len(user_data) if user_data else 0
                    
                    for user_idx, (user_id, sec_uid) in enumerate(user_data):
                        user_progress = current_progress + (user_idx / total_users) * (1 / total_keywords)
                        
                        try:
                            # 3. 采集用户详情 (使用 sec_uid)
                            if status_callback:
                                status_callback(f"正在采集用户 {user_id} 的详细信息...", user_progress)
                            
                            await self._create_and_setup_crawler(
                                platform=platform_code,
                                task_type="creator",
                                DY_CREATOR_ID_LIST=[sec_uid]  # 使用 sec_uid
                            )
                            
                            # 4. 采集用户粉丝 (使用 sec_uid)
                            if fans_limit > 0:
                                if status_callback:
                                    status_callback(f"正在采集用户 {user_id} 的粉丝数据...", user_progress)
                                
                                await self._create_and_setup_crawler(
                                    platform=platform_code,
                                    task_type="fans",
                                    DY_CREATOR_ID_LIST=[sec_uid],  # 使用 sec_uid
                                    MAX_FANS_COUNT=fans_limit
                                )
                            
                            # 5. 采集用户关注 (使用 sec_uid)
                            if follows_limit > 0:
                                if status_callback:
                                    status_callback(f"正在采集用户 {user_id} 的关注数据...", user_progress)
                                
                                await self._create_and_setup_crawler(
                                    platform=platform_code,
                                    task_type="followers",
                                    DY_CREATOR_ID_LIST=[sec_uid],  # 使用 sec_uid
                                    MAX_FOLLOWER_COUNT=follows_limit
                                )
                            
                        except Exception as e:
                            print(f"采集用户 {user_id} 数据时出错: {str(e)}")
                            continue
                    
                except Exception as e:
                    print(f"采集关键词 '{keyword}' 时出错: {str(e)}")
                    continue
            
            if status_callback:
                status_callback("数据采集完成！", 1.0)
                
        except Exception as e:
            raise Exception(f"数据采集失败: {str(e)}")
        finally:
            await self._cleanup_crawler()
    
    async def collect_keyword_based_data(self, platform: str, keywords: str,
                                       base_limit: int = 10,
                                       posts_limit: int = 20,
                                       comments_limit: int = 50,
                                       fans_limit: int = 100,
                                       follows_limit: int = 100,
                                       status_callback: Optional[callable] = None) -> None:
        """场景A：基于关键词的用户数据采集"""
        try:
            # 确保平台代码正确
            if platform not in self.PLATFORM_MAP.values():
                platform_code = self.PLATFORM_MAP.get(platform)
                if not platform_code:
                    raise ValueError(f"不支持的平台类型: {platform}")
            else:
                platform_code = platform
                
            utils.logger.info(f"[collect_keyword_based_data] 开始采集平台 {platform_code} 的数据")
            utils.logger.info(f"[collect_keyword_based_data] 关键词: {keywords}")
            utils.logger.info(f"[collect_keyword_based_data] 配置信息:")
            utils.logger.info(f"[collect_keyword_based_data] - base_limit: {base_limit}")
            utils.logger.info(f"[collect_keyword_based_data] - posts_limit: {posts_limit}")
            utils.logger.info(f"[collect_keyword_based_data] - comments_limit: {comments_limit}")
            utils.logger.info(f"[collect_keyword_based_data] - fans_limit: {fans_limit}")
            utils.logger.info(f"[collect_keyword_based_data] - follows_limit: {follows_limit}")
            
            # 设置通用配置
            config.SAVE_DATA_OPTION = "db"
            config.CRAWLER_MAX_NOTES_COUNT = posts_limit
            config.CRAWLER_MAX_COMMENTS_COUNT_SINGLENOTES = comments_limit
            config.MAX_FANS_COUNT = fans_limit
            config.MAX_FOLLOWER_COUNT = follows_limit
            config.PLATFORM = platform_code
            config.CRAWLER_TYPE = "search"
            
            utils.logger.info("[collect_keyword_based_data] 配置已更新")
            
            # 确保数据库连接可用
            if not await self.data_manager.ensure_connected():
                raise Exception("数据库连接失败")
            
            # 处理关键词
            if isinstance(keywords, str):
                keyword_list = [k.strip() for k in keywords.split(",") if k.strip()]
            else:
                keyword_list = [keywords]
                
            total_keywords = len(keyword_list)
            utils.logger.info(f"[collect_keyword_based_data] 共有 {total_keywords} 个关键词需要处理")
            
            for keyword_idx, keyword in enumerate(keyword_list):
                current_progress = keyword_idx / total_keywords
                if status_callback:
                    status_callback(f"正在搜索关键词 '{keyword}' 相关主题...", current_progress)
                
                try:
                    utils.logger.info(f"[collect_keyword_based_data] 开始处理关键词: {keyword}")
                    
                    # 创建并设置爬虫实例
                    await self._create_and_setup_crawler(
                        platform=platform_code,
                        task_type="search",
                        KEYWORDS=keyword,
                        base_limit=base_limit,
                        MAX_COMMENTS_COUNT=comments_limit
                    )
                    
                    if status_callback:
                        status_callback(f"关键词 '{keyword}' 数据采集完成", (keyword_idx + 1) / total_keywords)
                    
                except Exception as e:
                    utils.logger.error(f"[collect_keyword_based_data] 处理关键词 '{keyword}' 时出错: {str(e)}")
                    continue
            
            if status_callback:
                status_callback("数据采集完成！", 1.0)
                
        except Exception as e:
            utils.logger.error(f"[collect_keyword_based_data] 数据采集失败: {str(e)}")
            raise
        finally:
            await self._cleanup_crawler()

    async def collect_kol_fans_data(self, platform: str, user_ids: List[str],
                                  base_limit: int = 100,
                                  posts_limit: int = 20,
                                  comments_limit: int = 50,
                                  fans_limit: int = 100,
                                  follows_limit: int = 100,
                                  status_callback: Optional[callable] = None) -> None:
        """场景B：汽车大V粉丝数据采集"""
        try:
            platform_code = self.PLATFORM_MAP.get(platform)
            total_users = len(user_ids)
            
            for user_idx, user_id in enumerate(user_ids):
                current_progress = user_idx / total_users
                if status_callback:
                    status_callback(f"正在获取用户 {user_id} 的粉丝列表...", current_progress)
                
                try:
                    # 1. 获取大V的粉丝列表，使用base_limit限制
                    await self._create_and_setup_crawler(
                        platform=platform_code,
                        task_type="fans",
                        DY_CREATOR_ID_LIST=[user_id],
                        MAX_FANS_COUNT=base_limit
                    )
                    
                    # 2. 获取粉丝ID列表，限制粉丝数量为base_limit
                    fans_data = await self.data_manager.get_fans_ids(
                        platform=platform_code,
                        user_id=user_id,
                        limit=base_limit
                    )
                    total_fans = len(fans_data)
                    
                    for fan_idx, (fan_id, fan_sec_uid) in enumerate(fans_data):
                        fan_progress = current_progress + (fan_idx / total_fans) * (1 / total_users)
                        
                        try:
                            # 3. 获取粉丝详情
                            if status_callback:
                                status_callback(f"正在采集粉丝 {fan_id} 的详细信息...", fan_progress)
                            
                            await self._create_and_setup_crawler(
                                platform=platform_code,
                                task_type="creator",
                                DY_CREATOR_ID_LIST=[fan_sec_uid]
                            )
                            
                            # 4. 获取粉丝的主题，使用posts_limit限制
                            if status_callback:
                                status_callback(f"正在采集粉丝 {fan_id} 的主题数据...", fan_progress)
                            
                            await self._create_and_setup_crawler(
                                platform=platform_code,
                                task_type="creator_notes",
                                DY_CREATOR_ID_LIST=[fan_sec_uid],
                                posts_limit=posts_limit
                            )
                            
                            # 5. 获取粉丝的评论，使用comments_limit限制
                            if status_callback:
                                status_callback(f"正在采集粉丝 {fan_id} 的评论数据...", fan_progress)
                            
                            await self._create_and_setup_crawler(
                                platform=platform_code,
                                task_type="creator_comments",
                                DY_CREATOR_ID_LIST=[fan_sec_uid],
                                MAX_COMMENTS_COUNT=comments_limit
                            )
                            
                            # 6. 获取粉丝的关注，使用follows_limit限制
                            if status_callback:
                                status_callback(f"正在采集粉丝 {fan_id} 的关注数据...", fan_progress)
                            
                            await self._create_and_setup_crawler(
                                platform=platform_code,
                                task_type="followers",
                                DY_CREATOR_ID_LIST=[fan_sec_uid],
                                MAX_FOLLOWER_COUNT=follows_limit
                            )
                            
                        except Exception as e:
                            print(f"采集粉丝 {fan_id} 数据时出错: {str(e)}")
                            continue
                    
                except Exception as e:
                    print(f"处理用户 {user_id} 时出错: {str(e)}")
                    continue
            
            if status_callback:
                status_callback("数据采集完成！", 1.0)
                
        except Exception as e:
            raise Exception(f"粉丝数据采集失败: {str(e)}")
        finally:
            await self._cleanup_crawler()

    async def collect_topic_users_data(self, platform: str, topics: str,
                                     base_limit: int = 100,
                                     posts_limit: int = 20,
                                     comments_limit: int = 50,
                                     fans_limit: int = 100,
                                     follows_limit: int = 100,
                                     status_callback: Optional[callable] = None) -> None:
        """场景C：话题参与者数据采集"""
        try:
            platform_code = self.PLATFORM_MAP.get(platform)
            topic_list = [t.strip() for t in topics.split(",") if t.strip()]
            total_topics = len(topic_list)
            
            for topic_idx, topic in enumerate(topic_list):
                current_progress = topic_idx / total_topics
                if status_callback:
                    status_callback(f"正在获取话题 '{topic}' 下的主题列表...", current_progress)
                
                try:
                    # 1. 获取话题下的主题列表，使用base_limit限制
                    await self._create_and_setup_crawler(
                        platform=platform_code,
                        task_type="topic",
                        KEYWORDS=topic,
                        CRAWLER_MAX_NOTES_COUNT=base_limit,
                        MAX_COMMENTS_COUNT=comments_limit
                    )
                    
                    # 2. 获取发布主题的用户ID列表，限制用户数量为base_limit
                    user_data = await self.data_manager.get_unique_user_ids_and_sec_uids(
                        platform=platform_code,
                        limit=base_limit
                    )
                    total_users = len(user_data)
                    
                    for user_idx, (user_id, sec_uid) in enumerate(user_data):
                        user_progress = current_progress + (user_idx / total_users) * (1 / total_topics)
                        
                        try:
                            # 3. 获取用户详情
                            if status_callback:
                                status_callback(f"正在采集用户 {user_id} 的详细信息...", user_progress)
                            
                            await self._create_and_setup_crawler(
                                platform=platform_code,
                                task_type="creator",
                                DY_CREATOR_ID_LIST=[sec_uid]
                            )
                            
                            # 4. 获取用户的主题，使用posts_limit限制
                            if status_callback:
                                status_callback(f"正在采集用户 {user_id} 的主题数据...", user_progress)
                            
                            await self._create_and_setup_crawler(
                                platform=platform_code,
                                task_type="creator_notes",
                                DY_CREATOR_ID_LIST=[sec_uid],
                                posts_limit=posts_limit
                            )
                            
                            # 5. 获取用户的评论，使用comments_limit限制
                            if status_callback:
                                status_callback(f"正在采集用户 {user_id} 的评论数据...", user_progress)
                            
                            await self._create_and_setup_crawler(
                                platform=platform_code,
                                task_type="creator_comments",
                                DY_CREATOR_ID_LIST=[sec_uid],
                                MAX_COMMENTS_COUNT=comments_limit
                            )
                            
                            # 6. 获取用户的关注，使用follows_limit限制
                            if status_callback:
                                status_callback(f"正在采集用户 {user_id} 的关注数据...", user_progress)
                            
                            await self._create_and_setup_crawler(
                                platform=platform_code,
                                task_type="followers",
                                DY_CREATOR_ID_LIST=[sec_uid],
                                MAX_FOLLOWER_COUNT=follows_limit
                            )
                            
                        except Exception as e:
                            print(f"采集用户 {user_id} 数据时出错: {str(e)}")
                            continue
                    
                except Exception as e:
                    print(f"处理话题 '{topic}' 时出错: {str(e)}")
                    continue
            
            if status_callback:
                status_callback("数据采集完成！", 1.0)
                
        except Exception as e:
            raise Exception(f"话题数据采集失败: {str(e)}")
        finally:
            await self._cleanup_crawler()
    
    async def collect_keyword_posts_data(self, platform: str, keywords: str,
                                      posts_limit: int = 10,
                                      comments_limit: int = 20,
                                      status_callback: Optional[callable] = None) -> None:
        """场景1：基于关键词的帖子数据采集
        获取：
        1. 关键词相关的帖子内容
        2. 帖子的评论
        3. 评论用户的基本信息
        
        Args:
            platform: 平台名称
            keywords: 搜索关键词，多个关键词用逗号分隔
            posts_limit: 每个关键词获取的帖子数量限制
            comments_limit: 每个帖子获取的评论数量限制
            status_callback: 状态回调函数
        """
        try:
            # 获取平台代码
            platform_code = self.PLATFORM_MAP.get(platform)
            if not platform_code:
                raise ValueError(f"不支持的平台类型: {platform}")
                
            utils.logger.info(f"[collect_keyword_posts_data] 开始采集平台 {platform_code} 的帖子数据")
            utils.logger.info(f"[collect_keyword_posts_data] 关键词: {keywords}")
            
            # 处理关键词列表
            keyword_list = [k.strip() for k in keywords.split(",") if k.strip()]
            total_keywords = len(keyword_list)
            
            for keyword_idx, keyword in enumerate(keyword_list):
                current_progress = keyword_idx / total_keywords
                if status_callback:
                    status_callback(f"正在搜索关键词 '{keyword}' 相关帖子...", current_progress)
                    
                try:
                    # 创建爬虫实例，设置为搜索模式
                    await self._create_and_setup_crawler(
                        platform=platform_code,
                        task_type="search",
                        KEYWORDS=keyword,
                        base_limit=posts_limit,
                        MAX_COMMENTS_COUNT=comments_limit
                    )
                    
                    if status_callback:
                        status_callback(f"关键词 '{keyword}' 数据采集完成", (keyword_idx + 1) / total_keywords)
                        
                except Exception as e:
                    utils.logger.error(f"[collect_keyword_posts_data] 处理关键词 '{keyword}' 时出错: {str(e)}")
                    continue
                    
            if status_callback:
                status_callback("帖子数据采集完成！", 1.0)
                
        except Exception as e:
            utils.logger.error(f"[collect_keyword_posts_data] 数据采集失败: {str(e)}")
            raise
        finally:
            await self._cleanup_crawler()

    async def collect_user_posts_data(self, platform: str, user_ids: List[str],
                                    posts_limit: int = 20,
                                    comments_limit: int = 50,
                                    status_callback: Optional[callable] = None) -> None:
        """场景2：基于用户ID的帖子数据采集
        获取：
        1. 指定用户发布的帖子内容
        2. 帖子的评论数据
        
        Args:
            platform: 平台名称
            user_ids: 用户ID列表
            posts_limit: 每个用户获取的帖子数量限制
            comments_limit: 每个帖子获取的评论数量限制
            status_callback: 状态回调函数
        """
        try:
            # 获取平台代码
            platform_code = self.PLATFORM_MAP.get(platform)
            if not platform_code:
                raise ValueError(f"不支持的平台类型: {platform}")
                
            utils.logger.info(f"[collect_user_posts_data] 开始采集平台 {platform_code} 的用户帖子数据")
            utils.logger.info(f"[collect_user_posts_data] 用户数量: {len(user_ids)}")
            
            total_users = len(user_ids)
            for user_idx, user_id in enumerate(user_ids):
                current_progress = user_idx / total_users
                if status_callback:
                    status_callback(f"正在获取用户 {user_id} 的帖子数据...", current_progress)
                    
                try:
                    # 创建爬虫实例，设置为获取用户帖子模式
                    await self._create_and_setup_crawler(
                        platform=platform_code,
                        task_type="creator",
                        DY_CREATOR_ID_LIST=[user_id],
                        posts_limit=posts_limit,
                        MAX_COMMENTS_COUNT=comments_limit
                    )
                    
                    if status_callback:
                        status_callback(f"用户 {user_id} 的数据采集完成", (user_idx + 1) / total_users)
                        
                except Exception as e:
                    utils.logger.error(f"[collect_user_posts_data] 处理用户 {user_id} 时出错: {str(e)}")
                    continue
                    
            if status_callback:
                status_callback("用户帖子数据采集完成！", 1.0)
                
        except Exception as e:
            utils.logger.error(f"[collect_user_posts_data] 数据采集失败: {str(e)}")
            raise
        finally:
            await self._cleanup_crawler()
    
    async def collect_search_data(self, platform: str, keywords: str,
                                base_limit: int = 10,
                                comments_limit: int = 20,
                                status_callback: Optional[callable] = None) -> None:
        """场景1：基于关键词的搜索数据采集
        获取：
        1. 关键词相关的帖子内容
        2. 帖子的评论
        3. 评论用户的基本信息
        
        Args:
            platform: 平台名称（抖音、微博、小红书）
            keywords: 搜索关键词，多个关键词用逗号分隔
            base_limit: 每个关键词获取的帖子数量限制
            comments_limit: 每个帖子获取的评论数量限制
            status_callback: 状态回调函数
        """
        try:
            # 获取平台代码
            platform_code = self.PLATFORM_MAP.get(platform)
            if not platform_code:
                raise ValueError(f"不支持的平台类型: {platform}")
                
            utils.logger.info(f"[collect_search_data] 开始采集平台 {platform_code} 的数据")
            utils.logger.info(f"[collect_search_data] 关键词: {keywords}")
            utils.logger.info(f"[collect_search_data] 配置信息:")
            utils.logger.info(f"[collect_search_data] - base_limit: {base_limit}")
            utils.logger.info(f"[collect_search_data] - comments_limit: {comments_limit}")
            
            # 处理关键词列表
            keyword_list = [k.strip() for k in keywords.split(",") if k.strip()]
            total_keywords = len(keyword_list)
            
            for keyword_idx, keyword in enumerate(keyword_list):
                current_progress = keyword_idx / total_keywords
                if status_callback:
                    status_callback(f"正在搜索关键词 '{keyword}' 相关帖子...", current_progress)
                    
                try:
                    # 创建爬虫实例，设置为搜索模式
                    await self._create_and_setup_crawler(
                        platform=platform_code,
                        task_type="search",
                        KEYWORDS=keyword,
                        base_limit=base_limit,
                        MAX_COMMENTS_COUNT=comments_limit
                    )
                    
                    if status_callback:
                        status_callback(f"关键词 '{keyword}' 数据采集完成", (keyword_idx + 1) / total_keywords)
                        
                except Exception as e:
                    utils.logger.error(f"[collect_search_data] 处理关键词 '{keyword}' 时出错: {str(e)}")
                    continue
                    
            if status_callback:
                status_callback("数据采集完成！", 1.0)
                
        except Exception as e:
            utils.logger.error(f"[collect_search_data] 数据采集失败: {str(e)}")
            raise
        finally:
            await self._cleanup_crawler()

    async def collect_creator_data(self, platform: str, user_ids: List[str],
                                posts_limit: int = 20,
                                comments_limit: int = 50,
                                status_callback: Optional[callable] = None) -> None:
        """场景2：基于用户ID的创作者数据采集
        获取：
        1. 指定用户发布的帖子内容
        2. 帖子的评论数据
        
        Args:
            platform: 平台名称（抖音、微博、小红书）
            user_ids: 用户ID列表
            posts_limit: 每个用户获取的帖子数量限制
            comments_limit: 每个帖子获取的评论数量限制
            status_callback: 状态回调函数
        """
        try:
            platform_code = self.PLATFORM_MAP.get(platform)
            if not platform_code:
                raise ValueError(f"不支持的平台类型: {platform}")
                
            utils.logger.info(f"[collect_creator_data] 开始采集平台 {platform_code} 的用户数据")
            utils.logger.info(f"[collect_creator_data] 用户数量: {len(user_ids)}")
            
            total_users = len(user_ids)
            for user_idx, user_id in enumerate(user_ids):
                current_progress = user_idx / total_users
                if status_callback:
                    status_callback(f"正在获取用户 {user_id} 的帖子数据...", current_progress)
                    
                try:
                    # 创建爬虫实例，设置为获取用户帖子模式
                    await self._create_and_setup_crawler(
                        platform=platform_code,
                        task_type="creator",
                        DY_CREATOR_ID_LIST=[user_id],
                        posts_limit=posts_limit,
                        MAX_COMMENTS_COUNT=comments_limit
                    )
                    
                    if status_callback:
                        status_callback(f"用户 {user_id} 的数据采集完成", (user_idx + 1) / total_users)
                        
                except Exception as e:
                    utils.logger.error(f"[collect_creator_data] 处理用户 {user_id} 时出错: {str(e)}")
                    continue
                    
            if status_callback:
                status_callback("用户数据采集完成！", 1.0)
                
        except Exception as e:
            utils.logger.error(f"[collect_creator_data] 数据采集失败: {str(e)}")
            raise
        finally:
            await self._cleanup_crawler()

    async def collect_fans_data(self, platform: str, user_ids: List[str],
                             base_limit: int = 100,
                             posts_limit: int = 20,
                             comments_limit: int = 50,
                             status_callback: Optional[callable] = None) -> None:
        """场景3：粉丝数据采集
        获取：
        1. 指定用户的粉丝列表
        2. 粉丝发布的帖子内容
        3. 帖子的评论数据
        
        Args:
            platform: 平台名称（抖音、微博、小红书）
            user_ids: 用户ID列表
            base_limit: 每个用户获取的粉丝数量限制
            posts_limit: 每个粉丝获取的帖子数量限制
            comments_limit: 每个帖子获取的评论数量限制
            status_callback: 状态回调函数
        """
        try:
            platform_code = self.PLATFORM_MAP.get(platform)
            if not platform_code:
                raise ValueError(f"不支持的平台类型: {platform}")
                
            utils.logger.info(f"[collect_fans_data] 开始采集平台 {platform_code} 的粉丝数据")
            utils.logger.info(f"[collect_fans_data] 用户数量: {len(user_ids)}")
            
            total_users = len(user_ids)
            for user_idx, user_id in enumerate(user_ids):
                current_progress = user_idx / total_users
                if status_callback:
                    status_callback(f"正在获取用户 {user_id} 的粉丝数据...", current_progress)
                    
                try:
                    # 1. 获取粉丝列表
                    await self._create_and_setup_crawler(
                        platform=platform_code,
                        task_type="fans",
                        DY_CREATOR_ID_LIST=[user_id],
                        MAX_FANS_COUNT=base_limit
                    )
                    
                    # 2. 获取粉丝ID列表
                    fans_data = await self.data_manager.get_fans_ids(
                        platform=platform_code,
                        user_id=user_id,
                        limit=base_limit
                    )
                    
                    # 3. 获取每个粉丝的帖子和评论数据
                    total_fans = len(fans_data)
                    for fan_idx, (fan_id, fan_sec_uid) in enumerate(fans_data):
                        fan_progress = current_progress + (fan_idx / total_fans) * (1 / total_users)
                        if status_callback:
                            status_callback(f"正在获取粉丝 {fan_id} 的帖子数据...", fan_progress)
                            
                        try:
                            await self._create_and_setup_crawler(
                                platform=platform_code,
                                task_type="creator",
                                DY_CREATOR_ID_LIST=[fan_sec_uid],
                                posts_limit=posts_limit,
                                MAX_COMMENTS_COUNT=comments_limit
                            )
                        except Exception as e:
                            utils.logger.error(f"[collect_fans_data] 处理粉丝 {fan_id} 时出错: {str(e)}")
                            continue
                            
                except Exception as e:
                    utils.logger.error(f"[collect_fans_data] 处理用户 {user_id} 时出错: {str(e)}")
                    continue
                    
            if status_callback:
                status_callback("粉丝数据采集完成！", 1.0)
                
        except Exception as e:
            utils.logger.error(f"[collect_fans_data] 数据采集失败: {str(e)}")
            raise
        finally:
            await self._cleanup_crawler()
    
    async def collect_simple_search_data(self, platform: str, keywords: str, posts_limit: int, comments_limit: int, status_callback=None):
        """采集搜索数据"""
        try:
            # 获取平台代码
            platform_code = self.PLATFORM_MAP.get(platform)
            if not platform_code:
                raise ValueError(f"不支持的平台类型: {platform}")
                
            utils.logger.info(f"[collect_simple_search_data] 关键词: {keywords}")
            
            # 设置基础配置
            config.KEYWORDS = keywords
            config.CRAWLER_TYPE = "search"
            
            # 统一使用相同的参数名称
            await self._create_and_setup_crawler(
                platform=platform_code,
                task_type="search",
                KEYWORDS=keywords,
                max_notes_count=posts_limit,
                max_comments_count=comments_limit
            )
            
        except Exception as e:
            utils.logger.error(f"[collect_simple_search_data] 处理关键词 '{keywords}' 时出错: {str(e)}")
            raise
        finally:
            await self._cleanup_crawler()
    
    async def __aenter__(self):
        """异步上下文管理器入口"""
        await self.initialize()
        return self
    
    async def __aexit__(self, exc_type, exc_val, exc_tb):
        """异步上下文管理器出口"""
        await self.cleanup() 