import asyncio
import aiohttp
import json
from typing import List, Dict, Any, Optional
from bs4 import BeautifulSoup
import re
from urllib.parse import urljoin, urlparse
import os

class WebSearchTool:
    """
    网页搜索工具，支持多种搜索引擎
    """
    
    def __init__(self, search_engine: str = "duckduckgo", max_results: int = 10):
        self.search_engine = search_engine
        self.max_results = max_results
        self.session: Optional[aiohttp.ClientSession] = None
    
    async def __aenter__(self):
        self.session = aiohttp.ClientSession()
        return self
    
    async def __aexit__(self, exc_type, exc_val, exc_tb):
        if self.session:
            await self.session.close()
    
    async def search(self, query: str, max_results: Optional[int] = None) -> List[Dict[str, Any]]:
        """
        执行网页搜索
        
        Args:
            query: 搜索查询
            max_results: 最大结果数量
            
        Returns:
            搜索结果列表，每个结果包含title, url, snippet
        """
        max_results = max_results or self.max_results
        
        if self.search_engine == "duckduckgo":
            return await self._search_duckduckgo(query, max_results)
        elif self.search_engine == "bing":
            return await self._search_bing(query, max_results)
        else:
            return await self._search_duckduckgo(query, max_results)
    
    async def _search_duckduckgo(self, query: str, max_results: int) -> List[Dict[str, Any]]:
        """
        使用DuckDuckGo进行搜索
        """
        try:
            from duckduckgo_search import DDGS
            
            ddgs = DDGS()
            results = []
            
            # 使用DuckDuckGo搜索
            search_results = ddgs.text(query, max_results=max_results)
            
            for result in search_results:
                results.append({
                    "title": result.get("title", ""),
                    "url": result.get("href", ""),
                    "snippet": result.get("body", ""),
                    "source": "duckduckgo"
                })
            
            return results
            
        except ImportError:
            # 如果没有安装duckduckgo-search，回退到基本搜索
            return await self._basic_web_search(query, max_results)
        except Exception as e:
            print(f"DuckDuckGo搜索失败: {e}")
            return await self._basic_web_search(query, max_results)
    
    async def _basic_web_search(self, query: str, max_results: int) -> List[Dict[str, Any]]:
        """
        基础网页搜索（模拟搜索结果）
        """
        # 在实际应用中，这里应该实现真实的搜索API
        # 这里提供一个模拟实现
        mock_results = [
            {
                "title": f"{query} - 维基百科",
                "url": f"https://zh.wikipedia.org/wiki/{query.replace(' ', '_')}",
                "snippet": f"这是关于{query}的维基百科页面，包含详细的基础信息和相关概念。",
                "source": "wikipedia"
            },
            {
                "title": f"{query}的最新研究进展",
                "url": f"https://example.com/research/{query.replace(' ', '-')}",
                "snippet": f"这篇最新的研究论文深入探讨了{query}的前沿发展和未来趋势。",
                "source": "research_paper"
            },
            {
                "title": f"{query} - 行业分析报告",
                "url": f"https://example.com/industry/{query.replace(' ', '-')}-analysis",
                "snippet": f"这份行业分析报告提供了{query}的市场现状、竞争格局和发展前景。",
                "source": "industry_report"
            }
        ]
        
        return mock_results[:max_results]
    
    async def _search_bing(self, query: str, max_results: int) -> List[Dict[str, Any]]:
        """
        Bing搜索（需要API密钥）
        """
        # 这里可以集成Bing Search API
        # 需要Bing Search API密钥
        return await self._basic_web_search(query, max_results)
    
    async def fetch_content(self, url: str) -> Optional[str]:
        """
        获取网页内容
        
        Args:
            url: 网页URL
            
        Returns:
            网页文本内容
        """
        if not self.session:
            self.session = aiohttp.ClientSession()
        
        try:
            headers = {
                'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
            }
            
            async with self.session.get(url, headers=headers, timeout=30) as response:
                if response.status == 200:
                    html = await response.text()
                    return self._extract_text_from_html(html)
                else:
                    print(f"无法获取网页内容: {url}, 状态码: {response.status}")
                    return None
                    
        except Exception as e:
            print(f"获取网页内容失败: {url}, 错误: {e}")
            return None
    
    def _extract_text_from_html(self, html: str) -> str:
        """
        从HTML中提取文本内容
        """
        try:
            soup = BeautifulSoup(html, 'html.parser')
            
            # 移除脚本和样式标签
            for script in soup(["script", "style"]):
                script.decompose()
            
            # 获取文本
            text = soup.get_text()
            
            # 清理文本
            lines = (line.strip() for line in text.splitlines())
            chunks = (phrase.strip() for line in lines for phrase in line.split("  "))
            text = ' '.join(chunk for chunk in chunks if chunk)
            
            # 限制文本长度
            max_length = 5000
            if len(text) > max_length:
                text = text[:max_length] + "..."
            
            return text
            
        except Exception as e:
            print(f"HTML解析失败: {e}")
            return ""
    
    async def search_and_fetch(self, query: str, max_results: int = 5) -> List[Dict[str, Any]]:
        """
        搜索并获取网页内容
        
        Args:
            query: 搜索查询
            max_results: 最大结果数量
            
        Returns:
            包含标题、URL、摘要和内容的搜索结果列表
        """
        search_results = await self.search(query, max_results)
        
        enriched_results = []
        
        async with self as search_tool:
            for result in search_results:
                content = await search_tool.fetch_content(result["url"])
                
                enriched_result = {
                    **result,
                    "content": content or result["snippet"],
                    "fetched": content is not None
                }
                
                enriched_results.append(enriched_result)
                
                # 避免过于频繁的请求
                await asyncio.sleep(1)
        
        return enriched_results