# -*- coding: utf-8 -*-
"""
详情页数据获取模块
负责获取详情页数据并解析存储
使用requests而不是session
"""

import requests
import time
import random
import json
import logging
from typing import Dict, Optional, List
from urllib.parse import urljoin
from concurrent.futures import ThreadPoolExecutor, as_completed
from modules.common.redis_manager import RedisManager
from modules.common.proxy_manager import ProxyManager, RequestHelper
from modules.common.data_parser import HouzzDataParser, DataValidator
from modules.common.logger_manager import get_logger
from modules.common.user_agent_manager import UserAgentManager
from config.houzz_settings import DETAIL_SCRAPER_CONFIG
from config.houzz_settings import CATEGORIES

logger = get_logger('detail', spider_name='houzz')


class DetailPageScraper:
    """详情页爬虫"""
    
    def __init__(self, redis_manager: RedisManager, proxy_manager: ProxyManager):
        """初始化详情页爬虫"""
        self.redis_manager = redis_manager
        self.proxy_manager = proxy_manager
        self.request_helper = RequestHelper(proxy_manager)
        self.user_agent_manager = UserAgentManager()
        self.consecutive_errors = 0
        self.category_mapping = self._build_category_mapping()
        
    def _initialize_csrf_token(self) -> Optional[str]:
        """初始化并获取CSRF令牌"""
        for i in range(2):  # 重试一次
            try:
                response = self.request_helper.make_request("https://www.houzz.com", timeout=45)
                if not response:
                    return None
                
                import re
                match = re.search(r'"csrfToken":"([^"]+)"', response.text)
                if match:
                    # logger.info("✅ 成功获取CSRF令牌")
                    return match.group(1)
            except Exception as e:
                logger.error(f"初始化CSRF令牌失败 (尝试 {i + 1}/2): {e}")
                if i == 0:
                    time.sleep(3)
        
        logger.error("❌ 无法获取CSRF令牌")
        return "1"
    
    def _build_category_mapping(self) -> Dict[str, str]:
        """构建类目URL到名称的映射"""
        mapping = {}
        for category in CATEGORIES:
            if isinstance(category, dict) and 'url' in category:
                # 优先使用配置中的name字段
                if 'name' in category and category['name']:
                    mapping[category['url']] = category['name']
                else:
                    # 如果配置中没有name，从URL提取作为备用
                    extracted_name = self._extract_name_from_url(category['url'])
                    if extracted_name != "unknown":
                        mapping[category['url']] = extracted_name
        logger.info(f"构建了 {len(mapping)} 个类目映射")
        return mapping
    
    def _extract_name_from_url(self, category_url: str) -> str:
        """从URL中提取类目名称（内部辅助方法）"""
        try:
            # 使用正则表达式提取professionals/后面的类目名称
            # 例如: https://www.houzz.com/professionals/outdoor-lighting-and-audio-visual-systems/probr0-bo~t_11836
            # 应该提取: outdoor-lighting-and-audio-visual-systems
            import re
            pattern = r'/professionals/([^/]+)(?:/|$)'
            match = re.search(pattern, category_url)
            if match:
                category_name = match.group(1)
                # 移除可能的查询参数
                if '?' in category_name:
                    category_name = category_name.split('?')[0]
                return category_name
            
            # 如果正则匹配失败，使用路径分割作为备用
            parts = category_url.split('/')
            if len(parts) > 4 and 'professionals' in parts:
                professionals_index = parts.index('professionals')
                if professionals_index + 1 < len(parts):
                    category_name = parts[professionals_index + 1]
                    # 移除可能的查询参数
                    if '?' in category_name:
                        category_name = category_name.split('?')[0]
                    return category_name
            return "unknown"
        except Exception as e:
            logger.warning(f"从URL提取类目名称失败: {category_url}, 错误: {e}")
            return "unknown"
    
    def _extract_category_name(self, category_url: str) -> str:
        """从类目URL中提取类目名称
        
        优先级：
        1. 从配置映射中获取（如果配置了name）
        2. 从URL中提取
        """
        try:
            # 首先尝试从配置映射中获取
            if category_url in self.category_mapping:
                return self.category_mapping[category_url]
            
            # 如果配置中没有，从URL中提取类目名称
            return self._extract_name_from_url(category_url)
            
        except Exception as e:
            logger.warning(f"提取类目名称失败: {category_url}, 错误: {e}")
            return "unknown"
    
    def _fetch_url(self, url: str, params: Optional[Dict] = None, extra_headers: Optional[Dict] = None, csrf_token: str = None, retry_count: int = 0) -> Optional[requests.Response]:
        """获取URL内容"""
        import urllib.parse
        
        safe_url = urllib.parse.quote(url, safe='/:?=&%')
        
        # 构建请求头 - 使用随机User-Agent
        # headers = {
        #     "User-Agent": self.user_agent_manager.get_random_user_agent()
        # }
        #
        # headers.update({
        #     'X-CSRF-Token': csrf_token,
        #     'Origin': 'https://www.houzz.com',
        #     "x-requested-with": "XMLHttpRequest"
        # })
        cookies = {
            'v': '1760611674_0a85a737-5c26-4f67-ad2d-fe17efb76643_9c6bbfc39c768aeb27d1eb7664ccadaa',
            'hzv': '1760611674_0a85a737-5c26-4f67-ad2d-fe17efb76643_9c6bbfc39c768aeb27d1eb7664ccadaa',
            'vct': 'en-US-SBxazfBo4R1azfBo4h1azfBoCR9azfBo8B9azfBo',
            'FPAU': '1.1.1274685418.1760611678',
            'jdv': '',
            'prf': 'prodirDistFil%7C%7D',
            'cto_bundle': 'gkHC-V9uNCUyQmFiNGRqcGtKZmNQNnNnOEdzamVtJTJGYXR2d3l2V0d1NkFLWWRUcmlTcHF4JTJCJTJGJTJCeU5wZGFtR2dMbTJnOHR4empVZHIzaHdOOGdnMDFQQ0NDTW1VbHhUVml0MFU2QTJCT3IlMkZLQXk1NCUyQm5Eb1BaOWgzd0d0UGJ4OTV5cmhlJTJGMzlxbVRWTGs2NnVxaHVFNTBLOHdnTzZXVTJ5U1ZEaXQ2bTZNZ1o4b3pmRm5adktvMng2eExyaGVMQmZMZGhIcllk',
            'documentHeight': '742',
            'fstest': '8',

            'ServerAwinChannelCookie': 'other',
            'awin_source': 'pro_solutions_upgrade_header',
            'ABTastySession': 'mrasn=&lp=https%253A%252F%252Fwww.houzz.com%252Ffor-pros%252Fsoftware-construction',
            'ABTasty': 'uid=qxd08gp0m3w6h230&fst=1760625248754&pst=1761236768146&cst=1761479715145&ns=3&pvt=16&pvis=10&th=',
            'crto_mapped_user_id': 'u9qREl8wRnlvNlY1SmlJWG52eVFqJTJCZHIlMkYyQ28lMkJuMXVzdmEyNmhuWVlFT1FoM2U0JTNE',
            'hzd': 'fd165fb3-aa6c-4bf7-ba79-5a77c5d7dd3b%3A%3A%3Asigned_in_hp_ideas%3A%3ABath',
        }

        headers = {
            'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
            'accept-language': 'zh-CN,zh;q=0.9',
            'referer': 'https://www.houzz.com/',
            "User-Agent": self.user_agent_manager.get_random_user_agent(),
            "X-CSRF-Token": csrf_token,
            "x-requested-with": "XMLHttpRequest"
        }

        if extra_headers:
            headers.update(extra_headers)
        
        for attempt in range(retry_count + 1):
            try:
                # 获取代理
                proxy = self.proxy_manager.get_working_proxy() if self.proxy_manager.is_proxy_enabled() else None
                if not proxy:
                    logger.warning("详情页，未使用代理情况！")

                response = requests.get(
                    safe_url,
                    params=params,
                    headers=headers,
                    proxies=proxy,
                    timeout=DETAIL_SCRAPER_CONFIG['REQUEST_TIMEOUT'],
                    cookies=cookies
                )
                
                response.raise_for_status()
                self.consecutive_errors = 0  # 重置错误计数
                return response
                
            except requests.exceptions.HTTPError as e:
                if e.response.status_code == 429:
                    cool_down_time = random.uniform(30, 60)
                    logger.warning(f"429 请求过多，等待 {cool_down_time:.2f}秒")
                    time.sleep(cool_down_time)
                    continue
                elif e.response.status_code == 404:
                    logger.warning(f"页面不存在 (404): {url}")
                    return 'not_found'
                
                logger.warning(f"HTTP错误 {url} (尝试 {attempt + 1}): {e}")
                
            except requests.exceptions.RequestException as e:
                logger.warning(f"请求失败 {url} (尝试 {attempt + 1}): {e}")
                pass

            if attempt < DETAIL_SCRAPER_CONFIG['RETRY_COUNT'] + 2:
                time.sleep(2 ** attempt)  # 指数退避
            else:
                self.consecutive_errors += 1
                logger.error(f"URL获取失败，连续错误次数: {self.consecutive_errors}")
                
                if self.consecutive_errors >= DETAIL_SCRAPER_CONFIG['MAX_CONSECUTIVE_ERRORS']:
                    logger.error(f"达到最大连续错误次数限制: {self.consecutive_errors}")
                    # 重置错误计数，避免无限累积
                    self.consecutive_errors = 0
                    raise Exception("达到最大连续错误次数限制")
        
        return None
    
    def _fetch_api_data(self, api_type: str, user_id: int, referer_url: str, csrf_token: str = None) -> Optional[int]:
        """获取API数据（项目和视频数量）"""
        import urllib.parse
        
        safe_referer = urllib.parse.quote(referer_url, safe='/:?=&%')
        
        if api_type == "projects":
            api_url = "https://www.houzz.com/j/ajax/profileProjectsAjax"
            params = {"userId": user_id, "fromItem": 0, "itemsPerPage": 5000}
        elif api_type == "videos":
            api_url = "https://www.houzz.com/j/ajax/proAllVideos"
            params = {"userId": str(user_id), "fromItem": 0, "itemsPerPage": 500}
        else:
            return None
        
        try:
            api_headers = {'Referer': safe_referer}
            response = self._fetch_url(api_url, params, api_headers, csrf_token, retry_count=3)
            
            if not response or response == 'not_found':
                return None
            
            json_data = response.json()
            
            if api_type == "projects":
                projects = json_data.get("ctx", {}).get("data", {}).get("stores", {}).get("data", {}).get("ProjectStore", {}).get('data', {})
                return len(projects)
            elif api_type == "videos":
                videos_data = json_data.get("ctx", {}).get("data", {}).get("stores", {}).get("data", {}).get("SpaceStore", {}).get("data", {})
                return len(videos_data)
            
            return None
            
        except (json.JSONDecodeError, KeyError) as e:
            logger.error(f"解析API数据失败 {api_type}: {e}")
            return None
        except Exception as e:
            logger.error(f"获取API数据失败 {api_type}: {e}")
            return None
    
    def _get_media_counts_parallel(self, user_id: int, company_name: str, profile_url: str, csrf_token: str = None) -> Dict[str, Optional[int]]:
        """并行获取项目和视频数量"""
        results = {"projects": None, "videos": None}
        
        with ThreadPoolExecutor(max_workers=2, thread_name_prefix='ApiFetcher') as executor:
            future_to_api = {
                executor.submit(self._fetch_api_data, "projects", user_id, profile_url, csrf_token): "projects",
                executor.submit(self._fetch_api_data, "videos", user_id, profile_url, csrf_token): "videos"
            }
            
            for future in as_completed(future_to_api):
                api_type = future_to_api[future]
                try:
                    results[api_type] = future.result()
                except Exception as exc:
                    # 记录API获取失败，但不抛出异常，避免影响主流程
                    logger.warning(f"获取 '{api_type}' 数量失败 '{company_name}': {exc}")
                    # 重置连续错误计数，避免影响其他API调用
                    self.consecutive_errors = 0
        
        return results
    
    def scrape_detail_page(self, detail_url: str, csrf_token: str = None) -> Dict:
        """爬取详情页并解析数据"""
        try:
            # 添加随机延迟
            delay = random.uniform(DETAIL_SCRAPER_CONFIG['MIN_DELAY'], DETAIL_SCRAPER_CONFIG['MAX_DELAY'])
            time.sleep(delay)
            
            # logger.info(f"开始爬取详情页: {detail_url}")
            
            # 获取详情页内容
            response = self._fetch_url(detail_url, csrf_token=csrf_token, retry_count=DETAIL_SCRAPER_CONFIG['RETRY_COUNT'])
            
            if response == 'not_found':
                logger.warning(f"详情页返回404: {detail_url}")
                return {'status': 'failed', 'reason': '404_not_found'}
            
            if not response:
                logger.error(f"获取详情页失败: {detail_url}")
                return {'status': 'failed', 'reason': 'request_failed'}
            
            # 解析详情页
            parser = HouzzDataParser(response.text, detail_url)
            scraped_data = parser.parse_detail_page()
            
            if not scraped_data:
                # logger.error(f"解析详情页失败: {detail_url}")
                return {'status': 'failed', 'reason': 'parse_failed'}
            
            # 验证数据
            if not DataValidator.validate_detail_data(scraped_data):
                logger.error(f"详情页数据验证失败: {detail_url}")
                return {'status': 'failed', 'reason': 'validation_failed'}
            
            # 获取用户ID用于API调用
            user_id = scraped_data.get("pro_user_id")
            company_name = scraped_data.get("company_name")
            
            # 并行获取媒体数量
            if user_id and company_name:
                media_counts = self._get_media_counts_parallel(user_id, company_name, detail_url, csrf_token)
                
                if media_counts.get("projects") is not None:
                    scraped_data["projects"] = media_counts["projects"]
                if media_counts.get("videos") is not None:
                    scraped_data["videos"] = media_counts["videos"]
            
            # 清理数据
            scraped_data = DataValidator.clean_data(scraped_data)
            
            logger.info(f"成功爬取详情页: {detail_url}")
            return {'status': 'success', 'data': scraped_data}
            
        except Exception as e:
            logger.error(f"爬取详情页失败 {detail_url}: {e}")
            return {'status': 'failed', 'reason': f'exception: {str(e)}'}
    
    def process_detail_task(self, task: Dict) -> bool:
        """处理单个详情页任务"""
        try:
            detail_url = task['detail_url']
            category_url = task['category_url']
            category_page = task.get('category_page')
            retry_count = task.get('retry_count', 0)
            
            logger.info(f"处理详情页任务: {detail_url} (重试次数: {retry_count}, 类目页码: {category_page})")
            
            # 检查该URL+类目+页码组合是否已完成
            if self.redis_manager.is_data_completed(detail_url, category_url, category_page):
                logger.info(f"该URL+类目+页码组合已完成，跳过: {detail_url} (类目: {category_url}, 页码: {category_page})")
                # 更新统计信息：增加成功详情任务数（已完成的任务也算成功）
                self.redis_manager.increment_stats('success_detail_tasks', 1)
                self.redis_manager.update_stats('last_activity_time', int(time.time()))
                return True
            
            # 检查URL是否已经被抓取过（用于去重）
            if self.redis_manager.is_url_scraped(detail_url):
                # URL已抓取过，直接保存一条新记录（不重新抓取）
                logger.debug(f"URL已抓取过，直接保存新记录: {detail_url} (类目: {category_url}, 页码: {category_page})")
                # 获取已抓取的数据
                existing_data = self.redis_manager.get_completed_data(detail_url)
                if existing_data and existing_data.get('data'):
                    # 使用已存在的数据，但更新类目信息为当前类目
                    reused_data = existing_data['data'].copy()  # 复制数据避免修改原数据
                    
                    # 更新类目相关字段为当前任务的类目信息
                    reused_data['category_name'] = self._extract_category_name(category_url)
                    reused_data['category_url'] = category_url
                    reused_data['category_page'] = category_page
                    
                    if self.redis_manager.save_completed_data(detail_url, reused_data, category_url, category_page):
                        logger.info(f"成功保存URL的类目+页码记录: {detail_url} (类目: {reused_data['category_name']}, 页码: {category_page})")
                        # 更新统计信息：增加成功详情任务数
                        self.redis_manager.increment_stats('success_detail_tasks', 1)
                        self.redis_manager.update_stats('last_activity_time', int(time.time()))
                        return True
                    else:
                        logger.error(f"保存URL的类目+页码记录失败: {detail_url}")
                        # 更新统计信息：增加失败详情任务数
                        self.redis_manager.increment_stats('failed_detail_tasks', 1)
                        self.redis_manager.update_stats('last_activity_time', int(time.time()))
                        return False
                else:
                    logger.warning(f"URL已抓取但无法获取数据，需要重新抓取: {detail_url}")
                    # 继续执行抓取逻辑
            
            # 获取CSRF令牌
            csrf_token = self._initialize_csrf_token()
            
            # 爬取详情页
            result = self.scrape_detail_page(detail_url, csrf_token)
            
            if result['status'] == 'failed':
                # 任务失败，只更新失败次数，不立即重试
                task['retry_count'] = retry_count + 1
                task['last_failure_reason'] = result.get('reason', 'unknown')
                task['last_failure_time'] = int(time.time())
                
                if task['retry_count'] >= DETAIL_SCRAPER_CONFIG['MAX_FAILURE_COUNT']:
                    # 超过最大重试次数，放入失败队列
                    logger.error(f"详情页任务重试次数超限，放入失败队列: {detail_url} (重试次数: {task['retry_count']})")
                    self.redis_manager.add_detail_failed_task(task, f"重试次数超限 (第{task['retry_count']}次): {result.get('reason', 'unknown')}")
                    # 更新统计信息：增加失败详情任务数和最后活动时间
                    self.redis_manager.increment_stats('failed_detail_tasks', 1)
                    self.redis_manager.update_stats('last_activity_time', int(time.time()))
                else:
                    # 重新放回任务队列，保持重试次数
                    # logger.warning(f"详情页任务失败，重新放回队列: {detail_url} (第{task['retry_count']}次重试)")
                    self.redis_manager.add_detail_task_with_retry(detail_url, category_url, category_page, task['retry_count'])
                
                return False
            
            elif result['status'] == 'success':
                # 成功获取到数据，保存到Redis
                scraped_data = result.get('data', {})
                
                # 添加类目信息到数据中
                scraped_data['category_name'] = self._extract_category_name(category_url)
                scraped_data['category_url'] = category_url
                scraped_data['category_page'] = category_page
                
                if self.redis_manager.save_completed_data(detail_url, scraped_data, category_url, category_page):
                    logger.info(f"详情页数据保存成功: {detail_url} (类目: {scraped_data['category_name']}, 页码: {category_page})")
                    # 更新统计信息：增加成功详情任务数和最后活动时间
                    self.redis_manager.increment_stats('success_detail_tasks', 1)
                    self.redis_manager.update_stats('last_activity_time', int(time.time()))
                    return True
                else:
                    logger.error(f"详情页数据保存失败: {detail_url}")
                    return False
            else:
                # 其他情况，视为失败
                task['retry_count'] = retry_count + 1
                task['last_failure_reason'] = result.get('reason', 'unknown')
                task['last_failure_time'] = int(time.time())
                
                if task['retry_count'] >= DETAIL_SCRAPER_CONFIG['MAX_FAILURE_COUNT']:
                    logger.error(f"详情页任务重试次数超限，放入失败队列: {detail_url} (重试次数: {task['retry_count']})")
                    self.redis_manager.add_detail_failed_task(task, f"重试次数超限 (第{task['retry_count']}次): {result.get('reason', 'unknown')}")
                    # 更新统计信息：增加失败详情任务数和最后活动时间
                    self.redis_manager.increment_stats('failed_detail_tasks', 1)
                    self.redis_manager.update_stats('last_activity_time', int(time.time()))
                else:
                    # logger.warning(f"详情页任务失败，重新放回队列: {detail_url} (第{task['retry_count']}次重试)")
                    self.redis_manager.add_detail_task_with_retry(detail_url, category_url, category_page, task['retry_count'])
                
                return False
            
        except Exception as e:
            logger.error(f"处理详情页任务失败: {e}")
            # 异常情况也增加重试次数
            task['retry_count'] = task.get('retry_count', 0) + 1
            task['last_failure_reason'] = f'exception: {str(e)}'
            task['last_failure_time'] = int(time.time())
            
            if task['retry_count'] >= DETAIL_SCRAPER_CONFIG['MAX_FAILURE_COUNT']:
                logger.error(f"详情页任务异常重试次数超限，放入失败队列: {task['detail_url']} (重试次数: {task['retry_count']})")
                self.redis_manager.add_detail_failed_task(task, f"异常失败 (第{task['retry_count']}次): {str(e)}")
                # 更新统计信息：增加失败详情任务数
                self.redis_manager.increment_stats('failed_detail_tasks', 1)
            else:
                # logger.warning(f"详情页任务异常，重新放回队列: {task['detail_url']} (第{task['retry_count']}次重试)")
                self.redis_manager.add_detail_task_with_retry(task['detail_url'], task['category_url'], task.get('category_page'), task['retry_count'])
            return False
    
    def retry_failed_tasks(self):
        """重试失败的详情页任务"""
        logger.info("开始重试失败的详情页任务")
        
        retry_count = 0
        max_retries = 10  # 每次最多重试10个任务
        
        while retry_count < max_retries:
            failed_task = self.redis_manager.get_detail_failed_task()
            
            if not failed_task:
                logger.info("无详情页失败任务需要重试")
                break
            
            task = failed_task['task']
            retry_count += 1
            
            logger.info(f"重试详情页失败任务 ({retry_count}/{max_retries}): {task.get('detail_url', 'unknown')}")
            
            # 检查重试次数
            if task.get('retry_count', 0) >= DETAIL_SCRAPER_CONFIG['MAX_FAILURE_COUNT']:
                logger.warning(f"详情页任务重试次数超限，跳过: {task.get('detail_url', 'unknown')}")
                continue
            
            # 重新处理任务
            success = self.process_detail_task(task)
            
            if not success:
                # 再次失败，增加重试次数
                task['retry_count'] = task.get('retry_count', 0) + 1
                self.redis_manager.add_detail_failed_task(task, f"重试失败 (第{task['retry_count']}次)")
                # 更新统计信息：增加失败详情任务数
                self.redis_manager.increment_stats('failed_detail_tasks', 1)
                self.redis_manager.update_stats('last_activity_time', int(time.time()))
        
        logger.info(f"详情页重试完成，处理了 {retry_count} 个失败任务")
    
    def run_continuous(self):
        """持续运行详情页爬虫 - 持续异步抓取模式"""
        logger.info(f"🚀 启动详情页爬虫 (持续异步模式，最大并发: {DETAIL_SCRAPER_CONFIG['MAX_WORKERS']})")
        
        # 创建线程池，持续运行
        with ThreadPoolExecutor(max_workers=DETAIL_SCRAPER_CONFIG['MAX_WORKERS']) as executor:
            # 启动所有worker线程
            futures = []
            for i in range(DETAIL_SCRAPER_CONFIG['MAX_WORKERS']):
                future = executor.submit(self._worker_loop, i)
                futures.append(future)
            
            # 监控线程状态
            try:
                while True:
                    # 检查是否有线程异常退出
                    active_futures = [f for f in futures if not f.done()]
                    
                    if len(active_futures) < DETAIL_SCRAPER_CONFIG['MAX_WORKERS']:
                        # 重启退出的线程
                        for i in range(DETAIL_SCRAPER_CONFIG['MAX_WORKERS'] - len(active_futives)):
                            future = executor.submit(self._worker_loop, len(active_futures) + i)
                            futures.append(future)
                            logger.info(f"重启worker线程: {len(active_futures) + i}")
                    
                    time.sleep(5)  # 每5秒检查一次线程状态
                    
            except KeyboardInterrupt:
                logger.info("收到中断信号，停止详情页爬虫")
                # 取消所有未完成的任务
                for future in futures:
                    future.cancel()
    
    def _worker_loop(self, worker_id: int):
        """单个worker的工作循环"""
        logger.info(f"Worker {worker_id} 启动")
        
        while True:
            try:
                # 获取一个任务
                task = self.redis_manager.get_detail_task()
                
                if not task:
                    # 无任务时短暂等待，避免CPU空转
                    time.sleep(1)
                    continue
                
                # 处理任务
                success = self.process_detail_task(task)
                
                if success:
                    logger.info(f"Worker {worker_id} 完成任务: {task.get('detail_url', 'unknown')}")
                else:
                    logger.warning(f"Worker {worker_id} 任务失败: {task.get('detail_url', 'unknown')}")
                
                # 检查是否需要重置代理
                if self.proxy_manager.is_proxy_enabled() and self.consecutive_errors > 5:
                    logger.info(f"Worker {worker_id} 重置失败代理列表")
                    self.proxy_manager.reset_failed_proxies()
                    self.consecutive_errors = 0
                
                # 任务间短暂休息，控制QPS
                time.sleep(random.uniform(0.1, 0.3))
                
            except Exception as e:
                logger.error(f"Worker {worker_id} 运行错误: {e}")
                time.sleep(1)  # 出错时等待1秒再继续