"""
网页搜索工具
用于搜索指定URL并提取内容
"""
import os
import logging
import threading
import queue
import concurrent.futures
import functools
import random
import time
from typing import Dict, Any, Optional, List, Callable
import json
from pathlib import Path
import hashlib
from urllib.parse import urlparse

from tools.browser_tools import BrowserTool
from tools.content_extractor import ContentExtractor

# 获取日志记录器
logger = logging.getLogger(__name__)

# 针对高并发场景的浏览器实例池
class BrowserPool:
    """浏览器实例池，用于复用浏览器实例提高性能"""
    
    def __init__(self, max_size: int = 5, ttl: int = 300, headless: bool = True):
        """
        初始化浏览器实例池
        
        参数:
            max_size: 最大池大小
            ttl: 浏览器实例生存时间(秒)
            headless: 是否使用无头模式
        """
        self.pool = queue.Queue()
        self.max_size = max_size
        self.ttl = ttl
        self.headless = headless
        self.lock = threading.RLock()
        self.last_used = {}
        self.active_count = 0
        
        logger.info(f"初始化浏览器实例池，最大大小: {max_size}")
    
    def get_browser(self) -> BrowserTool:
        """
        从池中获取浏览器实例
        
        返回:
            BrowserTool实例
        """
        with self.lock:
            # 检查池中是否有可用实例
            try:
                while not self.pool.empty():
                    browser, timestamp = self.pool.get_nowait()
                    # 检查实例是否过期
                    if time.time() - timestamp > self.ttl:
                        browser.close()
                        logger.debug(f"关闭过期浏览器实例")
                        continue
                    self.active_count += 1
                    logger.debug(f"从池中获取浏览器实例，当前活跃实例: {self.active_count}")
                    return browser
            except queue.Empty:
                pass
                
            # 创建新实例
            browser = BrowserTool(headless=self.headless, stealth_mode=True)
            self.active_count += 1
            logger.debug(f"创建新浏览器实例，当前活跃实例: {self.active_count}")
            return browser
    
    def return_browser(self, browser: BrowserTool) -> None:
        """
        将浏览器实例归还到池中
        
        参数:
            browser: 浏览器实例
        """
        with self.lock:
            self.active_count -= 1
            # 检查是否需要保留实例
            if self.pool.qsize() < self.max_size:
                self.pool.put((browser, time.time()))
                logger.debug(f"浏览器实例归还到池中，当前池大小: {self.pool.qsize()}")
            else:
                browser.close()
                logger.debug(f"关闭多余浏览器实例，当前活跃实例: {self.active_count}")
    
    def close_all(self) -> None:
        """关闭所有浏览器实例"""
        with self.lock:
            while not self.pool.empty():
                try:
                    browser, _ = self.pool.get_nowait()
                    browser.close()
                except queue.Empty:
                    break
            logger.info(f"已关闭所有浏览器实例")

# 失败重试装饰器
def retry_with_backoff(max_retries: int = 3, initial_delay: float = 1.0, 
                       max_delay: float = 60.0, backoff_factor: float = 2.0):
    """
    指数退避重试装饰器
    
    参数:
        max_retries: 最大重试次数
        initial_delay: 初始延迟时间(秒)
        max_delay: 最大延迟时间(秒)
        backoff_factor: 退避因子
    """
    def decorator(func):
        @functools.wraps(func)
        def wrapper(*args, **kwargs):
            retries = 0
            delay = initial_delay
            
            while True:
                try:
                    return func(*args, **kwargs)
                except Exception as e:
                    retries += 1
                    if retries > max_retries:
                        logger.error(f"函数{func.__name__}执行失败，已达到最大重试次数")
                        raise
                    
                    # 计算延迟时间，添加随机抖动
                    delay = min(delay * backoff_factor, max_delay)
                    jitter = random.uniform(0.8, 1.2)
                    sleep_time = delay * jitter
                    
                    logger.warning(f"函数{func.__name__}执行失败，{retries}/{max_retries}次重试，延迟{sleep_time:.2f}秒: {str(e)}")
                    time.sleep(sleep_time)
        return wrapper
    return decorator

class WebSearchTool:
    """网页搜索工具类，用于抓取网页内容"""
    
    def __init__(self, headless: bool = True, output_dir: str = "./results", 
                 output_format: str = "markdown", extract_mode: str = "main"):
        """
        初始化网页搜索工具
        
        参数:
            headless: 是否使用无头浏览器模式
            output_dir: 搜索结果输出目录
            output_format: 输出格式，支持"markdown"和"text"
            extract_mode: 提取模式，支持"main"(主内容区)和"body"(整个body区域)
        """
        self.process_id = os.getpid()
        self.thread_id = threading.get_ident()
        # 流程性操作用INFO级别
        logger.info(f"[PID:{self.process_id}][TID:{self.thread_id}] 初始化WebSearchTool")
        
        self.browser_pool = BrowserPool(max_size=5, headless=headless)
        self.extractor = ContentExtractor(output_format=output_format, extract_mode=extract_mode)
        self.output_dir = Path(output_dir)
        self.output_format = output_format
        self.extract_mode = extract_mode
        
        # 创建输出目录
        self.output_dir.mkdir(parents=True, exist_ok=True)
        # 详细操作用DEBUG级别
        logger.debug(f"输出目录: {self.output_dir}")
    
    @retry_with_backoff(max_retries=3)
    def search(self, url: str) -> Dict[str, Any]:
        """
        搜索指定URL并返回提取的内容
        
        参数:
            url: 要搜索的网页URL
        
        返回:
            包含提取内容和元数据的字典
        """
        # 1. 生成缓存文件名
        cache_file = self.get_cache_filename(url)
        if os.path.exists(cache_file):
            # 2. 读取缓存并返回
            with open(cache_file, 'r', encoding='utf-8') as f:
                logger.info(f"命中缓存，直接返回: {cache_file}")
                return json.load(f)
        
        # 3. 未命中缓存，继续后续流程
        logger.info(f"开始搜索URL: {url}")
        
        # 从池中获取浏览器实例
        browser = self.browser_pool.get_browser()
        
        try:
            # 访问URL
            browser.get(url)
            # 详细操作用DEBUG级别
            logger.debug(f"成功访问URL: {url}")
            
            # 等待页面加载完成
            browser.simulate_human_reading(min_time=1.0, max_time=3.0)
            logger.debug(f"页面加载完成")
            
            # 获取页面HTML
            html_content = browser.page.html
            
            # 提取内容
            result = self.extractor.extract_content(html_content, url)
            # 重要结果用INFO级别
            logger.info(f"成功提取内容，标题: {result['metadata']['title']}")
            
            # 保存结果
            file_path = self._save_result(result, url)
            
            # 如果是Markdown格式，还保存一个单独的md文件
            if self.output_format == "markdown":
                self._save_markdown_file(result, file_path)
            
            return result
            
        except Exception as e:
            # 错误信息用ERROR级别
            logger.error(f"搜索过程中发生错误: {str(e)}", exc_info=True)
            raise
        finally:
            # 将浏览器实例归还到池中
            self.browser_pool.return_browser(browser)
            logger.debug(f"已归还浏览器实例到池中")
    
    def batch_search(self, urls: List[str], max_workers: int = 3, 
                     callback: Optional[Callable[[str, Dict[str, Any]], None]] = None) -> List[Dict[str, Any]]:
        """
        批量搜索多个URL
        
        参数:
            urls: URL列表
            max_workers: 最大并发工作线程数
            callback: 每个URL处理完成后的回调函数
            
        返回:
            搜索结果列表
        """
        logger.info(f"开始批量搜索，URL数量: {len(urls)}，最大并发数: {max_workers}")
        results = []
        errors = []
        
        # 使用线程池并发处理
        with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor:
            # 提交所有任务
            future_to_url = {executor.submit(self.search, url): url for url in urls}
            
            # 获取结果
            for future in concurrent.futures.as_completed(future_to_url):
                url = future_to_url[future]
                try:
                    result = future.result()
                    results.append(result)
                    logger.info(f"URL处理完成: {url}")
                    
                    # 调用回调函数
                    if callback:
                        try:
                            callback(url, result)
                        except Exception as e:
                            logger.error(f"回调函数执行失败: {str(e)}")
                            
                except Exception as e:
                    logger.error(f"处理URL失败: {url}, 错误: {str(e)}")
                    errors.append({"url": url, "error": str(e)})
        
        # 记录总体情况
        success_count = len(results)
        error_count = len(errors)
        total_count = len(urls)
        
        logger.info(f"批量搜索完成: 总计{total_count}个URL，成功{success_count}个，失败{error_count}个")
        
        # 如果有错误，保存错误日志
        if errors:
            error_file = self.output_dir / f"errors_{int(time.time())}.json"
            with open(error_file, 'w', encoding='utf-8') as f:
                json.dump(errors, f, ensure_ascii=False, indent=2)
            logger.warning(f"错误信息已保存到: {error_file}")
        
        return results
    
    def _save_result(self, result: Dict[str, Any], url: str) -> str:
        """
        保存搜索结果到缓存文件
        参数:
            result: 搜索结果字典
            url: 原始请求的URL
        返回:
            保存的文件路径
        """
        cache_file = self.get_cache_filename(url)
        with open(cache_file, 'w', encoding='utf-8') as f:
            json.dump(result, f, ensure_ascii=False, indent=2)
        logger.info(f"搜索结果已保存到: {cache_file}")
        return str(cache_file)
    
    def _save_markdown_file(self, result: Dict[str, Any], json_path: str) -> str:
        """
        保存Markdown文件
        
        参数:
            result: 搜索结果字典
            json_path: JSON文件路径
            
        返回:
            保存的Markdown文件路径
        """
        # 根据JSON文件路径生成Markdown文件路径
        md_path = json_path.replace('.json', '.md')
        
        # 生成Markdown内容
        md_content = f"# {result['metadata']['title']}\n\n"
        
        if result['metadata']['description']:
            md_content += f"_{result['metadata']['description']}_\n\n"
            
        md_content += f"来源: [{result['metadata']['domain']}]({result['url']})\n\n"
        
        if result['metadata']['author']:
            md_content += f"作者: {result['metadata']['author']}\n\n"
            
        if result['metadata']['publish_date']:
            md_content += f"发布日期: {result['metadata']['publish_date']}\n\n"
        
        # 添加提取模式信息
        md_content += f"提取模式: {self.extract_mode}\n\n"
            
        md_content += "---\n\n"
        md_content += result['content']
        
        # 保存Markdown文件
        with open(md_path, 'w', encoding='utf-8') as f:
            f.write(md_content)
            
        logger.info(f"Markdown内容已保存到: {md_path}")
        return md_path
        
    def __del__(self):
        """释放资源"""
        try:
            self.browser_pool.close_all()
        except:
            pass

    def get_cache_filename(self, url: str) -> str:
        """
        根据URL生成缓存文件名
        """
        # 取域名
        domain = urlparse(url).netloc.replace(':', '_')
        # 用hash保证唯一性
        url_hash = hashlib.md5(url.encode('utf-8')).hexdigest()
        # body模式后缀
        extract_mode_suffix = "_body" if self.extract_mode == "body" else ""
        # 拼接文件名
        filename = f"{domain}_{url_hash}{extract_mode_suffix}.json"
        return str(self.output_dir / filename) 