# -*- coding: utf-8 -*-
"""
列表页数据获取模块
负责获取列表页数据并创建详情页任务
使用requests而不是session
"""

import requests
import time
import random
import logging
import threading
from concurrent.futures import ThreadPoolExecutor, as_completed
from typing import List, Dict, Optional
from urllib.parse import urljoin
from modules.common.redis_manager import RedisManager
from modules.common.proxy_manager import ProxyManager, RequestHelper
from modules.common.data_parser import HouzzDataParser
from modules.common.logger_manager import get_logger
from modules.common.user_agent_manager import UserAgentManager
from config.houzz_settings import LIST_PAGE_CONFIG, LIST_SCRAPER_CONFIG

logger = get_logger('list', spider_name='houzz')


class ListPageScraper:
    """列表页爬虫"""
    
    def __init__(self, redis_manager: RedisManager, proxy_manager: ProxyManager):
        """初始化列表页爬虫"""
        self.redis_manager = redis_manager
        self.proxy_manager = proxy_manager
        self.request_helper = RequestHelper(proxy_manager)
        self.user_agent_manager = UserAgentManager()
        self.consecutive_errors = 0
        
    def _initialize_csrf_token(self) -> Optional[str]:
        """初始化并获取CSRF令牌"""
        for i in range(2):  # 重试一次
            try:
                response = self.request_helper.make_request("https://www.houzz.com", timeout=45)
                if not response:
                    return None
                
                import re
                match = re.search(r'"csrfToken":"([^"]+)"', response.text)
                if match:
                    return match.group(1)
            except Exception as e:
                logger.error(f"初始化CSRF令牌失败 (尝试 {i + 1}/2): {e}")
                if i == 0:
                    time.sleep(3)
        
        logger.error("❌ 无法获取CSRF令牌")
        return None
    
    def _fetch_url(self, url: str, params: Optional[Dict] = None, csrf_token: str = None, retry_count: int = 0) -> Optional[requests.Response]:
        """获取URL内容"""
        import urllib.parse
        
        # 构建完整URL，包含params
        if params:
            # 将params拼接到URL中
            param_string = urllib.parse.urlencode(params)
            if '?' in url:
                full_url = f"{url}&{param_string}"
            else:
                full_url = f"{url}?{param_string}"
        else:
            full_url = url
        
        safe_url = urllib.parse.quote(full_url, safe='/:?=&%')
        
        # 构建请求头 - 使用随机User-Agent
        # headers = {
        #     "User-Agent": self.user_agent_manager.get_random_user_agent()
        # }
        cookies = {
            'v': '1760611674_0a85a737-5c26-4f67-ad2d-fe17efb76643_9c6bbfc39c768aeb27d1eb7664ccadaa',
            'hzv': '1760611674_0a85a737-5c26-4f67-ad2d-fe17efb76643_9c6bbfc39c768aeb27d1eb7664ccadaa',
            'vct': 'en-US-SBxazfBo4R1azfBo4h1azfBoCR9azfBo8B9azfBo',
            'FPAU': '1.1.1274685418.1760611678',
            'jdv': '',
            'prf': 'prodirDistFil%7C%7D',
            'cto_bundle': 'gkHC-V9uNCUyQmFiNGRqcGtKZmNQNnNnOEdzamVtJTJGYXR2d3l2V0d1NkFLWWRUcmlTcHF4JTJCJTJGJTJCeU5wZGFtR2dMbTJnOHR4empVZHIzaHdOOGdnMDFQQ0NDTW1VbHhUVml0MFU2QTJCT3IlMkZLQXk1NCUyQm5Eb1BaOWgzd0d0UGJ4OTV5cmhlJTJGMzlxbVRWTGs2NnVxaHVFNTBLOHdnTzZXVTJ5U1ZEaXQ2bTZNZ1o4b3pmRm5adktvMng2eExyaGVMQmZMZGhIcllk',
            'documentHeight': '742',
            'fstest': '8',

            'ServerAwinChannelCookie': 'other',
            'awin_source': 'pro_solutions_upgrade_header',
            'ABTastySession': 'mrasn=&lp=https%253A%252F%252Fwww.houzz.com%252Ffor-pros%252Fsoftware-construction',
            'ABTasty': 'uid=qxd08gp0m3w6h230&fst=1760625248754&pst=1761236768146&cst=1761479715145&ns=3&pvt=16&pvis=10&th=',
            'crto_mapped_user_id': 'u9qREl8wRnlvNlY1SmlJWG52eVFqJTJCZHIlMkYyQ28lMkJuMXVzdmEyNmhuWVlFT1FoM2U0JTNE',
            'hzd': 'fd165fb3-aa6c-4bf7-ba79-5a77c5d7dd3b%3A%3A%3Asigned_in_hp_ideas%3A%3ABath',
        }

        headers = {
            'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
            'accept-language': 'zh-CN,zh;q=0.9',
            'referer': 'https://www.houzz.com/',
            "User-Agent": self.user_agent_manager.get_random_user_agent(),
            "x-requested-with": "XMLHttpRequest"
        }
        if csrf_token:
            headers.update({
                'X-CSRF-Token': csrf_token,
            })
        
        for attempt in range(retry_count + 1):
            try:
                # 获取代理
                proxy = self.proxy_manager.get_working_proxy() if self.proxy_manager.is_proxy_enabled() else None
                if not proxy:
                    logger.error("列表页，未使用代理情况！")
                    
                response = requests.get(
                    safe_url,
                    headers=headers,
                    proxies=proxy,
                    timeout=LIST_SCRAPER_CONFIG['REQUEST_TIMEOUT'],
                    cookies=cookies,
                )
                
                response.raise_for_status()
                self.consecutive_errors = 0  # 重置错误计数
                return response
                
            except requests.exceptions.HTTPError as e:
                if e.response.status_code == 429:
                    cool_down_time = random.uniform(30, 60)
                    logger.warning(f"429 请求过多，等待 {cool_down_time:.2f}秒")
                    time.sleep(cool_down_time)
                    continue
                elif e.response.status_code == 404:
                    # 404状态码，继续处理这个响应，让后续逻辑判断是否有详情URL
                    logger.warning(f"收到404状态码: {full_url}")
                    # 直接返回这个响应，让后续逻辑处理
                    return e.response
                
                logger.warning(f"HTTP错误 {full_url} (尝试 {attempt + 1}): {e}")
                
            except requests.exceptions.RequestException as e:
                logger.warning(f"请求失败 {full_url} (尝试 {attempt + 1}): {e}")
            
            if attempt < LIST_SCRAPER_CONFIG['RETRY_COUNT'] + 2:
                time.sleep(2 ** attempt)  # 指数退避
            else:
                self.consecutive_errors += 1
                logger.error(f"URL获取失败，连续错误次数: {self.consecutive_errors}")
                
                if self.consecutive_errors >= LIST_SCRAPER_CONFIG['MAX_CONSECUTIVE_ERRORS']:
                    raise Exception("达到最大连续错误次数限制")
        
        return None
    
    def scrape_list_page(self, category_url: str, page: int, csrf_token: str = None) -> Dict:
        """爬取列表页并提取详情页URLs"""
        try:
            # 添加随机延迟
            delay = random.uniform(LIST_SCRAPER_CONFIG['MIN_DELAY'], LIST_SCRAPER_CONFIG['MAX_DELAY'])
            time.sleep(delay)
            
            # 构建列表页URL参数
            list_params = {"fi": str((page - 1) * LIST_PAGE_CONFIG['ITEMS_PER_PAGE'])}
            
            # logger.info(f"开始爬取列表页: {category_url} 第{page}页")
            
            # 获取列表页内容
            response = self._fetch_url(category_url, list_params, csrf_token, retry_count=LIST_SCRAPER_CONFIG['RETRY_COUNT'])
            
            if not response:
                logger.error(f"获取第{page}页失败")
                return {'status': 'failed', 'reason': 'request_failed'}
            
            # 解析列表页（包括404的情况）
            parser = HouzzDataParser(response.text, category_url)
            profile_urls = parser.parse_list_page()
            
            if profile_urls:
                # 有详情页URLs，任务成功（无论是否404）
                logger.info(f"从第{page}页提取到 {len(profile_urls)} 个详情页URL")
                return {'status': 'success', 'urls': profile_urls}
            else:
                # 无详情页URLs，视为失败（包括404页面）
                logger.warning(f"第{page}页无详情页URL，视为失败")
                return {'status': 'failed', 'reason': 'no_urls_found'}
            
        except Exception as e:
            logger.error(f"爬取列表页失败 {category_url} 第{page}页: {e}")
            return {'status': 'failed', 'reason': f'exception: {str(e)}'}
    
    def process_list_task(self, task: Dict) -> bool:
        """处理单个列表页任务"""
        try:
            category_url = task['category_url']
            page = task['page']
            retry_count = task.get('retry_count', 0)
            
            logger.info(f"处理列表页任务: {category_url} 第{page}页 (重试次数: {retry_count})")
            
            # 获取CSRF令牌
            csrf_token = self._initialize_csrf_token()
            
            # 爬取列表页
            result = self.scrape_list_page(category_url, page, csrf_token)
            
            if result['status'] == 'failed':
                # 任务失败，只更新失败次数，不立即重试
                task['retry_count'] = retry_count + 1
                task['last_failure_reason'] = result.get('reason', 'unknown')
                task['last_failure_time'] = int(time.time())
                
                if task['retry_count'] >= LIST_SCRAPER_CONFIG['MAX_FAILURE_COUNT']:
                    # 超过最大重试次数，放入失败队列
                    logger.error(f"列表页任务重试次数超限，放入失败队列: {category_url} 第{page}页 (重试次数: {task['retry_count']})")
                    self.redis_manager.add_list_failed_task(task, f"重试次数超限 (第{task['retry_count']}次): {result.get('reason', 'unknown')}")
                else:
                    # 重新放回任务队列，保持重试次数
                    logger.warning(f"列表页任务失败，重新放回队列: {category_url} 第{page}页 (第{task['retry_count']}次重试)")
                    self.redis_manager.add_list_task_with_retry(category_url, page, task['retry_count'])
                
                return False
            
            elif result['status'] == 'no_data':
                # 无数据，任务完成（进行统计）
                logger.info(f"第{page}页无详情页URL，任务完成")
                # 更新统计信息：增加成功页数（无数据也算成功处理）
                self.redis_manager.increment_stats('success_pages', 1)
                self.redis_manager.update_stats('last_activity_time', int(time.time()))
                return True
            
            elif result['status'] == 'success':
                # 成功获取到数据，创建详情页任务
                profile_urls = result.get('urls', [])
                detail_tasks = []
                for profile_url in profile_urls:
                    detail_task = {
                        'detail_url': profile_url,
                        'category_url': category_url,
                        'category_page': page,
                        'status': 'pending',
                        'created_at': int(time.time()),
                        'retry_count': 0
                    }
                    detail_tasks.append(detail_task)
                
                # 批量添加详情页任务
                if detail_tasks:
                    self.redis_manager.batch_add_detail_tasks(detail_tasks)
                    # 更新统计信息：增加总详情任务数和成功页数
                    self.redis_manager.increment_stats('total_detail_tasks', len(detail_tasks))
                    self.redis_manager.increment_stats('success_pages', 1)
                    self.redis_manager.update_stats('last_activity_time', int(time.time()))
                    logger.info(f"创建了 {len(detail_tasks)} 个详情页任务")
                
                return True
            else:
                # 其他情况，视为失败
                task['retry_count'] = retry_count + 1
                task['last_failure_reason'] = result.get('reason', 'unknown')
                task['last_failure_time'] = int(time.time())
                
                if task['retry_count'] >= LIST_SCRAPER_CONFIG['MAX_FAILURE_COUNT']:
                    logger.error(f"列表页任务重试次数超限，放入失败队列: {category_url} 第{page}页 (重试次数: {task['retry_count']})")
                    self.redis_manager.add_list_failed_task(task, f"重试次数超限 (第{task['retry_count']}次): {result.get('reason', 'unknown')}")
                else:
                    logger.warning(f"列表页任务失败，重新放回队列: {category_url} 第{page}页 (第{task['retry_count']}次重试)")
                    self.redis_manager.add_list_task_with_retry(category_url, page, task['retry_count'])
                
                return False
            
        except Exception as e:
            logger.error(f"处理列表页任务失败: {e}")
            # 异常情况也增加重试次数
            task['retry_count'] = task.get('retry_count', 0) + 1
            task['last_failure_reason'] = f'exception: {str(e)}'
            task['last_failure_time'] = int(time.time())
            
            if task['retry_count'] >= LIST_SCRAPER_CONFIG['MAX_FAILURE_COUNT']:
                logger.error(f"列表页任务异常重试次数超限，放入失败队列: {task['category_url']} 第{task['page']}页 (重试次数: {task['retry_count']})")
                self.redis_manager.add_list_failed_task(task, f"异常失败 (第{task['retry_count']}次): {str(e)}")
            else:
                logger.warning(f"列表页任务异常，重新放回队列: {task['category_url']} 第{task['page']}页 (第{task['retry_count']}次重试)")
                self.redis_manager.add_list_task_with_retry(task['category_url'], task['page'], task['retry_count'])
            return False
    
    def retry_failed_tasks(self):
        """重试失败的列表页任务"""
        logger.info("开始重试失败的列表页任务")
        
        retry_count = 0
        max_retries = 10  # 每次最多重试10个任务
        
        while retry_count < max_retries:
            failed_task = self.redis_manager.get_list_failed_task()
            
            if not failed_task:
                logger.info("无列表页失败任务需要重试")
                break
            
            task = failed_task['task']
            retry_count += 1
            
            logger.info(f"重试列表页失败任务 ({retry_count}/{max_retries}): {task.get('category_url', 'unknown')} 第{task.get('page', 'unknown')}页")
            
            # 检查重试次数
            if task.get('retry_count', 0) >= LIST_SCRAPER_CONFIG['MAX_FAILURE_COUNT']:
                logger.warning(f"列表页任务重试次数超限，跳过: {task.get('category_url', 'unknown')}")
                continue
            
            # 重新处理任务
            success = self.process_list_task(task)
            
            if not success:
                # 再次失败，增加重试次数
                task['retry_count'] = task.get('retry_count', 0) + 1
                self.redis_manager.add_list_failed_task(task, f"重试失败 (第{task['retry_count']}次)")
                # 更新统计信息：增加失败页数（重试失败也算失败）
                self.redis_manager.increment_stats('failed_pages', 1)
                self.redis_manager.update_stats('last_activity_time', int(time.time()))
        
        logger.info(f"列表页重试完成，处理了 {retry_count} 个失败任务")
    
    def run_continuous(self):
        """持续运行列表页爬虫 - 持续异步抓取模式"""
        logger.info(f"🚀 启动列表页爬虫 (持续异步模式，最大并发: {LIST_SCRAPER_CONFIG['MAX_WORKERS']})")
        
        # 创建线程池，持续运行
        with ThreadPoolExecutor(max_workers=LIST_SCRAPER_CONFIG['MAX_WORKERS']) as executor:
            # 启动所有worker线程
            futures = []
            for i in range(LIST_SCRAPER_CONFIG['MAX_WORKERS']):
                future = executor.submit(self._worker_loop, i)
                futures.append(future)
            
            # 监控线程状态
            try:
                while True:
                    # 检查是否有线程异常退出
                    active_futures = [f for f in futures if not f.done()]
                    
                    if len(active_futures) < LIST_SCRAPER_CONFIG['MAX_WORKERS']:
                        # 重启退出的线程
                        for i in range(LIST_SCRAPER_CONFIG['MAX_WORKERS'] - len(active_futures)):
                            future = executor.submit(self._worker_loop, len(active_futures) + i)
                            futures.append(future)
                            logger.info(f"重启worker线程: {len(active_futures) + i}")
                    
                    time.sleep(5)  # 每5秒检查一次线程状态
                    
            except KeyboardInterrupt:
                logger.info("收到中断信号，停止列表页爬虫")
                # 取消所有未完成的任务
                for future in futures:
                    future.cancel()
    
    def _worker_loop(self, worker_id: int):
        """单个worker的工作循环"""
        logger.info(f"Worker {worker_id} 启动")
        
        while True:
            try:
                # 获取一个任务
                task = self.redis_manager.get_list_task()
                
                if not task:
                    # 无任务时短暂等待，避免CPU空转
                    time.sleep(1)
                    continue
                
                # 处理任务
                success = self.process_list_task(task)
                
                if success:
                    logger.info(f"Worker {worker_id} 完成任务: {task.get('category_url', 'unknown')} 第{task.get('page', 'unknown')}页")
                else:
                    logger.warning(f"Worker {worker_id} 任务失败: {task.get('category_url', 'unknown')} 第{task.get('page', 'unknown')}页")

                # 检查是否需要重置代理
                if self.proxy_manager.is_proxy_enabled() and self.consecutive_errors > 5:
                    logger.info(f"Worker {worker_id} 重置失败代理列表")
                    self.proxy_manager.reset_failed_proxies()
                    self.consecutive_errors = 0
                
                # 任务间短暂休息，控制QPS
                time.sleep(random.uniform(0.1, 0.5))
                
            except Exception as e:
                logger.error(f"Worker {worker_id} 运行错误: {e}")
                time.sleep(1)  # 出错时等待1秒再继续
    
    def create_initial_tasks(self, categories: List[Dict]):
        """创建初始列表页任务 - 使用快速批量创建"""
        logger.info("创建初始列表页任务")
        
        all_tasks = []
        total_created = 0
        
        # 收集所有任务
        for category in categories:
            category_url = category['url']
            start_page = category.get('start_page', LIST_PAGE_CONFIG['START_PAGE'])
            end_page = category.get('end_page', LIST_PAGE_CONFIG['MAX_PAGES_PER_CATEGORY'])
            
            logger.info(f"为类目 {category.get('name', 'unknown')} 准备任务: 第{start_page}-{end_page}页")
            
            category_tasks = []
            for page in range(start_page, end_page + 1):
                task = {
                    'category_url': category_url,
                    'page': page,
                    'status': 'pending',
                    'created_at': int(time.time()),
                    'retry_count': 0,
                    'task_key': f"{category_url}:{page}"
                }
                category_tasks.append(task)
                all_tasks.append(task)
            
            logger.info(f"类目 {category.get('name', 'unknown')} 准备了 {len(category_tasks)} 个任务")
        
        # 批量快速创建所有任务
        if all_tasks:
            logger.info(f"开始批量创建 {len(all_tasks)} 个列表页任务...")
            created_count = self.redis_manager.batch_add_list_tasks_fast(all_tasks)
            total_created = created_count
            logger.info(f"✅ 快速批量创建完成: 成功创建 {total_created} 个任务")
        else:
            logger.warning("没有任务需要创建")