import os
import time
import logging
from typing import Dict, List, Optional
from concurrent.futures import ThreadPoolExecutor, as_completed
import requests
from bs4 import BeautifulSoup
import re


class EnhancedWebBrowserTool:
    """增强版网页浏览工具 - 支持智能搜索和内容汇总"""
    
    def __init__(self, base_browser_tool, llm_client, max_workers=3):
        """
        初始化增强版网页浏览工具
        
        Args:
            base_browser_tool: 基础的WebBrowserTool实例
            llm_client: LLM客户端实例
            max_workers: 并发处理的最大线程数
        """
        self.browser_tool = base_browser_tool
        self.llm = llm_client
        self.max_workers = max_workers
        self.logger = logging.getLogger(__name__)
        
        # 添加停止标志支持
        self._stop_flags = {}  # 存储每个会话的停止标志
        
        # 设置请求头，模拟真实浏览器
        self.headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
            'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
            'Accept-Encoding': 'gzip, deflate',
            'Connection': 'keep-alive',
            'Upgrade-Insecure-Requests': '1',
        }

    async def intelligent_search(self, query: str, max_results: int = 5, deep_analysis: bool = True, session_id: str = None) -> Dict:
        """
        智能搜索功能 - 搜索 + 内容抓取 + LLM分析
        注意：这是异步版本
        """
        # 调用流式版本并返回最终结果
        final_result = None
        async for result in self.intelligent_search_stream(query, max_results, deep_analysis, session_id):
            final_result = result
        return final_result or {"error": "搜索失败", "query": query}

    async def intelligent_search_stream(self, query: str, max_results: int = 5, deep_analysis: bool = True, session_id: str = None):
        """
        智能搜索功能 - 流式版本，分阶段返回结果
        
        Args:
            query: 搜索查询
            max_results: 最大结果数量
            deep_analysis: 是否进行深度分析
            session_id: 会话ID，用于停止控制
            
        Yields:
            包含搜索进度和结果的字典
        """
        try:
            # 检查是否需要停止
            if session_id and self._stop_flags.get(session_id, False):
                yield {
                    "query": query,
                    "error": "搜索已被用户停止",
                    "stopped": True,
                    "stage": "初始化",
                    "progress": 0
                }
                return
                
            self.logger.info(f"开始智能搜索: {query}")
            
            # 阶段1: 开始搜索
            yield {
                "query": query,
                "stage": "正在搜索...",
                "progress": 10,
                "status": "搜索中"
            }
            
            # 1. 执行基础搜索
            search_results = self.browser_tool.search_web(query, max_results)
            
            # 再次检查停止标志
            if session_id and self._stop_flags.get(session_id, False):
                yield {
                    "query": query,
                    "error": "搜索已被用户停止",
                    "stopped": True,
                    "stage": "搜索阶段",
                    "progress": 20
                }
                return
            
            if "error" in search_results:
                yield {
                    "query": query,
                    "error": search_results["error"],
                    "stage": "搜索失败",
                    "progress": 100,
                    "summary": "搜索失败，无法获取结果",
                    "analysis": "由于搜索服务不可用，无法进行内容分析"
                }
                return
            
            # 阶段2: 搜索完成，开始抓取内容
            yield {
                "query": query,
                "stage": "正在抓取网页内容...",
                "progress": 30,
                "status": "内容抓取中",
                "search_results": search_results.get("results", [])
            }
            
            # 2. 并发抓取网页内容
            enriched_results = []
            search_list = search_results.get("results", [])
            
            for i, result in enumerate(search_list):
                # 检查停止标志
                if session_id and self._stop_flags.get(session_id, False):
                    yield {
                        "query": query,
                        "error": "搜索已被用户停止",
                        "stopped": True,
                        "stage": "内容抓取阶段",
                        "progress": int(30 + (i / len(search_list)) * 30)
                    }
                    return
                
                # 抓取单个页面内容
                enriched_result = self._fetch_single_page_content(result, session_id)
                enriched_results.append(enriched_result)
                
                # 返回进度更新
                progress = int(30 + ((i + 1) / len(search_list)) * 30)
                yield {
                    "query": query,
                    "stage": f"已抓取 {i+1}/{len(search_list)} 个页面",
                    "progress": progress,
                    "status": "内容抓取中",
                    "partial_results": enriched_results
                }

            # print(f"抓取到的页面内容：{enriched_results}")
            
            # 检查停止标志
            if session_id and self._stop_flags.get(session_id, False):
                yield {
                    "query": query,
                    "error": "搜索已被用户停止",
                    "stopped": True,
                    "stage": "内容抓取完成",
                    "progress": 60
                }
                return
            
            if not deep_analysis or not await self.llm.is_available():
                # 阶段3: 无LLM分析，直接返回结果
                yield {
                    "query": query,
                    "stage": "搜索完成",
                    "progress": 100,
                    "status": "完成",
                    "results": enriched_results,
                    "detailed_results": enriched_results,
                    "search_engine": search_results.get("search_engine", "未知"),
                    "result_count": len(enriched_results),
                    "summary": "已获取搜索结果和网页内容，但LLM不可用，无法进行深度分析"
                }
                return
            
            # 阶段4: LLM分析
            yield {
                "query": query,
                "stage": "正在进行AI分析...",
                "progress": 70,
                "status": "AI分析中",
                "results": enriched_results
            }
            
            # 3. LLM分析和汇总
            analysis_result = await self._analyze_with_llm(query, enriched_results, session_id)
            
            # 检查停止标志
            if session_id and self._stop_flags.get(session_id, False):
                yield {
                    "query": query,
                    "error": "搜索已被用户停止",
                    "stopped": True,
                    "stage": "AI分析阶段",
                    "progress": 90
                }
                return
            
            # 阶段5: 完成
            yield {
                "query": query,
                "stage": "智能搜索完成",
                "progress": 100,
                "status": "完成",
                "results": enriched_results,
                "detailed_results": enriched_results,
                "search_engine": search_results.get("search_engine", "未知"),
                "result_count": len(enriched_results),
                "summary": analysis_result.get("summary", ""),
                "key_points": analysis_result.get("key_points", []),
                "analysis": analysis_result.get("analysis", ""),
                "recommendations": analysis_result.get("recommendations", [])
            }
            
        except Exception as e:
            self.logger.error(f"智能搜索过程中出错: {e}")
            yield {
                 "query": query,
                 "error": f"智能搜索失败: {str(e)}",
                 "stage": "错误",
                 "progress": 100,
                 "summary": "搜索过程中发生错误",
                 "analysis": "无法完成搜索和分析"
             }

    def _fetch_page_contents(self, search_results: List[Dict], session_id: str = None) -> List[Dict]:
        """
        并发抓取多个网页的内容
        
        Args:
            search_results: 搜索结果列表
            session_id: 会话ID，用于停止控制
            
        Returns:
            包含内容的增强搜索结果
        """
        enriched_results = []
        
        # 使用线程池并发处理
        with ThreadPoolExecutor(max_workers=self.max_workers) as executor:
            # 提交所有任务
            future_to_result = {
                executor.submit(self._fetch_single_page_content, result, session_id): result 
                for result in search_results
            }
            
            # 收集结果
            for future in as_completed(future_to_result):
                # 检查停止标志
                if session_id and self._stop_flags.get(session_id, False):
                    # 取消剩余的任务
                    for f in future_to_result:
                        if not f.done():
                            f.cancel()
                    break
                    
                original_result = future_to_result[future]
                try:
                    enhanced_result = future.result(timeout=30)  # 30秒超时
                    enriched_results.append(enhanced_result)
                except Exception as e:
                    self.logger.warning(f"抓取页面内容失败 {original_result.get('url', '')}: {e}")
                    # 保留原始结果，标记为内容获取失败
                    original_result["content"] = ""
                    original_result["content_error"] = str(e)
                    enriched_results.append(original_result)
        
        # 按原始排序保持结果顺序
        enriched_results.sort(key=lambda x: x.get("rank", 999))
        return enriched_results

    def _fetch_single_page_content(self, result: Dict, session_id: str = None) -> Dict:
        """
        获取单个页面的内容
        
        Args:
            result: 搜索结果项
            session_id: 会话ID，用于停止控制
            
        Returns:
            包含页面内容的结果
        """
        url = result.get("url", "")
        if not url:
            return result
        
        # 检查停止标志
        if session_id and self._stop_flags.get(session_id, False):
            result["content"] = ""
            result["content_error"] = "用户停止了搜索"
            return result
        
        try:
            # 使用基础浏览器工具获取页面内容，减少超时时间以提高响应性
            page_data = self.browser_tool.get_page_content(url, timeout=8, session_id=session_id)
            
            if "error" in page_data:
                self.logger.warning(f"获取页面内容失败 {url}: {page_data['error']}")
                result["content"] = ""
                result["content_error"] = page_data["error"]
                return result
            
            # 如果基础工具成功获取内容
            if page_data.get("success") and page_data.get("content"):
                result["content"] = page_data["content"]
                result["page_title"] = page_data.get("title", result.get("title", ""))
                return result
            
            # 如果基础工具失败，尝试使用requests作为备选方案
            self.logger.info(f"基础浏览器工具失败，尝试使用requests获取 {url}")
            
            # 在发起请求前再次检查停止标志
            if session_id and self._stop_flags.get(session_id, False):
                result["content"] = ""
                result["content_error"] = "用户停止了搜索"
                return result
            
            headers = {
                'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
            }
            
            response = requests.get(url, headers=headers, timeout=8)  # 减少超时时间
            response.raise_for_status()
            
            # 在处理响应前检查停止标志
            if session_id and self._stop_flags.get(session_id, False):
                result["content"] = ""
                result["content_error"] = "用户停止了搜索"
                return result
            
            soup = BeautifulSoup(response.content, 'html.parser')
            
            # 移除脚本和样式标签
            for script in soup(["script", "style"]):
                script.decompose()
                # 在循环中检查停止标志
                if session_id and self._stop_flags.get(session_id, False):
                    result["content"] = ""
                    result["content_error"] = "用户停止了搜索"
                    return result
            
            # 在处理内容前检查停止标志
            if session_id and self._stop_flags.get(session_id, False):
                result["content"] = ""
                result["content_error"] = "用户停止了搜索"
                return result
            
            # 尝试提取主要内容
            content = ""
            main_selectors = ['main', 'article', '.content', '#content', '.post', '.entry']
            
            for selector in main_selectors:
                # 在每次选择器检查前检查停止标志
                if session_id and self._stop_flags.get(session_id, False):
                    result["content"] = ""
                    result["content_error"] = "用户停止了搜索"
                    return result
                    
                main_content = soup.select_one(selector)
                if main_content:
                    content = main_content.get_text(strip=True, separator=' ')
                    break
            else:
                # 如果找不到主要内容区域，提取body内容
                body = soup.find('body')
                if body:
                    content = body.get_text(strip=True, separator=' ')
            
            # 清理和限制内容长度
            content = re.sub(r'\s+', ' ', content)  # 合并多个空白字符
            content = content[:3000]  # 限制长度
            
            result["content"] = content
            result["page_title"] = soup.title.string if soup.title else result.get("title", "")
            
        except Exception as e:
            self.logger.warning(f"抓取页面内容失败 {url}: {e}")
            result["content"] = ""
            result["content_error"] = str(e)
        
        return result

    async def _analyze_with_llm(self, query: str, enriched_results: List[Dict], session_id: str = None) -> Dict:
        """
        使用LLM分析搜索结果和内容
        
        Args:
            query: 原始查询
            enriched_results: 包含内容的搜索结果
            session_id: 会话ID，用于停止控制
            
        Returns:
            LLM分析结果
        """
        try:
            # 检查停止标志
            if session_id and self._stop_flags.get(session_id, False):
                return {
                    "summary": "用户停止了搜索，分析被中断",
                    "key_points": [],
                    "analysis": "搜索被用户停止",
                    "recommendations": []
                }
            
            # 构建分析提示词
            prompt = self._build_analysis_prompt(query, enriched_results)
            
            # 调用LLM进行分析 - 使用流式版本以便更好地响应停止
            if hasattr(self.llm, 'chat_stream'):
                llm_response = ""
                async for chunk in self.llm.chat_stream(prompt, session_id=session_id):
                    # 在每个chunk后检查停止标志
                    if session_id and self._stop_flags.get(session_id, False):
                        return {
                            "summary": "用户停止了搜索，分析被中断",
                            "key_points": [],
                            "analysis": "搜索被用户停止",
                            "recommendations": []
                        }
                    llm_response += chunk
            else:
                # 回退到异步版本
                llm_response = await self.llm.chat(prompt)
            
            # 解析LLM响应
            return self._parse_llm_response(llm_response)
            
        except Exception as e:
            self.logger.error(f"LLM分析失败: {e}")
            return {
                "summary": "LLM分析失败，但已获取网页内容",
                "key_points": [],
                "analysis": f"分析过程中出错: {str(e)}",
                "recommendations": []
            }

    def _build_analysis_prompt(self, query: str, enriched_results: List[Dict]) -> str:
        """构建LLM分析提示词"""
        prompt_parts = [
            f"请分析以下关于'{query}'的搜索结果和网页内容，提供综合性的总结和分析。并且你目前是我本地部署的ai，时间日期之类的信息以搜索结果为准，不需要多余的回答，比如有几条结果不可用之类的",
            "",
            "搜索结果和内容：",
            ""
        ]
        
        for i, result in enumerate(enriched_results, 1):
            prompt_parts.append(f"【结果 {i}】")
            prompt_parts.append(f"标题: {result.get('title', '无标题')}")
            prompt_parts.append(f"URL: {result.get('url', '无URL')}")
            
            if result.get('snippet'):
                prompt_parts.append(f"摘要: {result['snippet']}")
            
            if result.get('content'):
                content = result['content'][:1000]  # 限制内容长度
                prompt_parts.append(f"内容: {content}")
            elif result.get('content_error'):
                prompt_parts.append(f"内容获取失败: {result['content_error']}")
            
            prompt_parts.append("")
        
        prompt_parts.extend([
            "请按以下格式提供分析：",
            "",
            "## 综合总结",
            "[提供200-300字的综合总结]",
            "",
            "## 关键要点",
            "1. [要点1]",
            "2. [要点2]",
            "3. [要点3]",
            "",
            "## 详细分析",
            "[提供更详细的分析，包括不同来源的观点对比]",
            "",
            "## 建议和推荐",
            "1. [建议1]",
            "2. [建议2]",
            "",
            "请确保分析客观、准确，并突出最重要的信息。"
        ])
        
        return "\n".join(prompt_parts)

    def _parse_llm_response(self, llm_response: str) -> Dict:
        """解析LLM响应，提取结构化信息"""
        try:
            # 简单的文本解析，提取各个部分
            sections = {
                "summary": "",
                "key_points": [],
                "analysis": "",
                "recommendations": []
            }
            
            lines = llm_response.split('\n')
            current_section = None
            current_content = []
            
            for line in lines:
                line = line.strip()
                
                if '## 综合总结' in line or '综合总结' in line:
                    if current_section and current_content:
                        sections[current_section] = '\n'.join(current_content)
                    current_section = 'summary'
                    current_content = []
                elif '## 关键要点' in line or '关键要点' in line:
                    if current_section and current_content:
                        sections[current_section] = '\n'.join(current_content)
                    current_section = 'key_points'
                    current_content = []
                elif '## 详细分析' in line or '详细分析' in line:
                    if current_section and current_content:
                        if current_section == 'key_points':
                            sections[current_section] = [item.strip('1234567890. ') for item in current_content if item.strip()]
                        else:
                            sections[current_section] = '\n'.join(current_content)
                    current_section = 'analysis'
                    current_content = []
                elif '## 建议和推荐' in line or '建议和推荐' in line or '## 建议' in line:
                    if current_section and current_content:
                        if current_section == 'key_points':
                            sections[current_section] = [item.strip('1234567890. ') for item in current_content if item.strip()]
                        else:
                            sections[current_section] = '\n'.join(current_content)
                    current_section = 'recommendations'
                    current_content = []
                elif line and not line.startswith('#'):
                    current_content.append(line)
            
            # 处理最后一个部分
            if current_section and current_content:
                if current_section in ['key_points', 'recommendations']:
                    sections[current_section] = [item.strip('1234567890. ') for item in current_content if item.strip()]
                else:
                    sections[current_section] = '\n'.join(current_content)
            
            # 如果解析失败，将整个响应作为分析内容
            if not any(sections.values()):
                sections['analysis'] = llm_response
                sections['summary'] = llm_response[:300] + "..." if len(llm_response) > 300 else llm_response
            
            return sections
            
        except Exception as e:
            self.logger.error(f"解析LLM响应失败: {e}")
            return {
                "summary": llm_response[:300] + "..." if len(llm_response) > 300 else llm_response,
                "key_points": [],
                "analysis": llm_response,
                "recommendations": []
            }

    def stop_generation(self, session_id: str) -> bool:
        """停止指定会话的生成"""
        try:
            if session_id:
                self._stop_flags[session_id] = True
                # 同时停止基础浏览器工具
                browser_stopped = True
                if hasattr(self.browser_tool, 'stop_generation'):
                    browser_stopped = self.browser_tool.stop_generation(session_id)
                return browser_stopped
            return True
        except Exception as e:
            self.logger.error(f"停止增强浏览器生成失败: {e}")
            return False

    def stop_all_generations(self):
        """停止所有会话的生成"""
        # 设置所有会话的停止标志
        for session_id in list(self._stop_flags.keys()):
            self._stop_flags[session_id] = True
        # 同时停止基础浏览器工具
        if hasattr(self.browser_tool, 'stop_all_generations'):
            self.browser_tool.stop_all_generations()

    def clear_stop_flag(self, session_id: str):
        """清除指定会话的停止标志"""
        if session_id in self._stop_flags:
            del self._stop_flags[session_id]
        # 同时清除基础浏览器工具的停止标志
        if hasattr(self.browser_tool, 'clear_stop_flag'):
            self.browser_tool.clear_stop_flag(session_id)