import requests
import random
import time
import json
from bs4 import BeautifulSoup
import asyncio
from pyppeteer import launch
import os
import sys
from dotenv import load_dotenv
import warnings
import atexit

# # 添加父目录到 Python 路径
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))

# 修改相对导入为绝对导入
from tools.base import Tool

# 加载环境变量
load_dotenv()

# 忽略特定的运行时警告
warnings.filterwarnings("ignore", message="coroutine '.*' was never awaited")
warnings.filterwarnings("ignore", message="Enable tracemalloc to get the object allocation traceback")

# 重写atexit回调函数来忽略事件循环关闭错误
def cleanup_handler():
    try:
        loop = asyncio.get_event_loop()
        if not loop.is_closed():
            loop.close()
    except Exception:
        pass

atexit.register(cleanup_handler)

def get_random_agent():
    """获取随机User-Agent"""
    agents = [
        'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
        'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Edge/91.0.864.59 Safari/537.36',
        'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:89.0) Gecko/20100101 Firefox/89.0'
    ]
    return random.choice(agents)

async def search_zhihu(query: str) -> list:
    """使用Pyppeteer搜索知乎"""
    browser = None
    try:
        print("正在启动浏览器...")
        chrome_path = os.getenv("CHROME_PATH", "C:\\Program Files\\Google\\Chrome\\Application\\chrome.exe")
        print(f"Chrome路径: {chrome_path}")
        
        if not os.path.exists(chrome_path):
            print(f"错误: Chrome浏览器路径不存在")
            return []
            
        browser = await launch(
            executablePath=chrome_path,
            headless=True,
            args=[
                '--no-sandbox',
                '--disable-setuid-sandbox',
                '--disable-blink-features=AutomationControlled',
                '--disable-infobars',
                '--window-size=1920,1080',
                '--start-maximized',
                '--disable-gpu',
                '--disable-dev-shm-usage'
            ],
            ignoreHTTPSErrors=True,
            userDataDir='./user_data'
        )
        
        print("浏览器启动成功，正在创建新页面...")
        page = await browser.newPage()
        
        # 设置更真实的浏览器特征
        await page.setUserAgent('Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/122.0.0.0 Safari/537.36')
        await page.setViewport({'width': 1920, 'height': 1080})
        
        # 注入用于绕过webdriver检测的JS
        await page.evaluateOnNewDocument('''
            Object.defineProperty(navigator, 'webdriver', {
                get: () => undefined
            });
            Object.defineProperty(navigator, 'plugins', {
                get: () => [1, 2, 3, 4, 5]
            });
        ''')
            
        api_data = None
        done = asyncio.Event()

        async def intercept_response(response):
            nonlocal api_data
            if 'api/v4/search_v3?' in response.url:
                try:
                    print(f"捕获到API响应: {response.url}")
                    data = await response.json()
                    if 'error' not in data:
                        api_data = data
                        done.set()
                except Exception as e:
                    print(f"处理API响应时出错: {str(e)}")
                    
        page.on('response', lambda res: asyncio.ensure_future(intercept_response(res)))
        
        zhihu_cookies = os.getenv("ZHIHU_COOKIES")
        if zhihu_cookies:
            print("正在设置知乎cookies...")
            cookies = []
            for cookie_part in zhihu_cookies.split('; '):
                try:
                    name, value = cookie_part.split('=', 1)
                    cookies.append({
                        'name': name,
                        'value': value,
                        'domain': '.zhihu.com',
                        'path': '/'
                    })
                except ValueError:
                    continue
            
            if cookies:
                await page.setCookie(*cookies)
                print(f"成功设置 {len(cookies)} 个cookies")
        
        search_url = f'https://www.zhihu.com/search?type=content&q={query}'
        print(f"正在访问搜索页面: {search_url}")
        await page.goto(search_url, {
            'waitUntil': 'networkidle0',
            'timeout': 30000
        })
        
        try:
            print("等待API响应...")
            await asyncio.wait_for(done.wait(), timeout=10)
            print("成功接收到API响应")
        except asyncio.TimeoutError:
            print("等待API响应超时")
            return []
            
        results = []
        if api_data and 'data' in api_data:
            print(f"找到 {len(api_data['data'])} 条原始结果")
            for item in api_data['data']:
                if 'object' in item:
                    obj = item['object']
                    title = obj.get('title', obj.get('question', {}).get('title', ''))
                    content = obj.get('excerpt', obj.get('description', ''))
                    url = f"https://www.zhihu.com/question/{obj['question']['id']}" if 'question' in obj else obj.get('url', '')
                    
                    if title and url:
                        results.append({
                            'title': title,
                            'content': content,
                            'url': url,
                        })
            print(f"成功解析 {len(results)} 条有效结果")
        else:
            print("API数据格式不正确或为空")
        
        return results
        
    except Exception as e:
        print(f"搜索过程发生错误: {str(e)}")
        return []
        
    finally:
        if browser:
            try:
                print("正在关闭浏览器...")
                await browser.close()
            except Exception as e:
                print(f"关闭浏览器时出错: {str(e)}")

def search_web(query: str, num_results: int = 5, engines: str = 'bing,yahoo') -> dict:
    """使用API搜索网页"""
    headers = {
        'User-Agent': get_random_agent(),
        'Accept': 'application/json'
    }
    
    results = []
    error_msg = None
    
    try:
        # 如果包含知乎搜索
        if 'zhihu' in engines:
            zhihu_results = asyncio.run(search_zhihu(query))
            results.extend(zhihu_results)
            engines = engines.replace('zhihu', '').strip(',')
        
        # 如果还有其他搜索引擎
        if engines:
            params = {
                'q': query,
                'engines': engines
            }
            
            url = 'https://luo-search.netlify.app/.netlify/functions/search'
            response = requests.get(url, params=params, headers=headers, timeout=10)
            response.raise_for_status()
            data = response.json()
            
            if isinstance(data, dict) and data.get('status') == 'success':
                search_results = data.get('data', {}).get('results', [])
                for item in search_results:
                    if len(results) >= num_results:
                        break
                    
                    title = item.get('title', '').strip()
                    link = item.get('link', '').strip()
                    snippet = item.get('description', '暂无描述').strip()
                    
                    if title and link:
                        results.append({
                            'title': title,
                            'url': link,
                            'description': snippet,
                            'source': item.get('source', '')
                        })
            else:
                error_msg = f"API返回格式错误"
                    
    except Exception as e:
        error_msg = f"搜索出错: {str(e)}"
    
    return {
        'success': len(results) > 0,
        'results': results[:num_results],
        'error': error_msg if not results else None
    }

def execute(params: dict) -> str:
    """执行search_web工具"""
    try:
        engines = params.get('engines', 'bing,yahoo')
        query = params.get("query")
        if not query:
            return "错误：未提供搜索查询"
        
        num_results = int(params.get("num_results", "5"))
        print(f"\n搜索：{query}")
        
        results = []
        retry_count = 2
        
        while retry_count > 0 and not results:
            search_result = search_web(query, num_results, engines)
            if search_result['success']:
                results = search_result['results']
            
            if not results:
                retry_count -= 1
                if retry_count > 0:
                    print(f"\n未找到结果，正在重试... (剩余{retry_count}次)")
                    time.sleep(1)
        
        if not results:
            return "未找到相关内容，建议:\n1. 调整搜索关键词\n2. 稍后重试"
        
        result_text = f"为您找到 {len(results)} 条相关内容：\n\n"
        
        for i, result in enumerate(results, 1):
            result_text += f"{i}. **{result['title']}**\n"
            result_text += f"   - [链接]({result['url']})\n"
            result_text += f"   - 描述: {result['description']}\n"
            if result.get('source'):
                result_text += f"   - 来源: {result['source']}\n"
            result_text += "\n"
        
        result_text += "\n提示：使用Ctrl+点击或右键在新标签页中打开链接。"
        
        return result_text
        
    except Exception as e:
        return f"搜索过程出错：{str(e)}"

TOOL = Tool(
    name="search_web",
    description="搜索网页内容，支持使用Bing、Yahoo和知乎搜索引擎",
    parameters={
        "query": {
            "type": "string",
            "description": "搜索查询词",
            "required": True
        },
        "num_results": {
            "type": "integer",
            "description": "返回结果数量(默认5)",
            "required": False
        },
        "engines": {
            "type": "string",
            "description": "使用的搜索引擎（默认为bing,yahoo）",
            "required": False
        }
    },
    example='''<search_web>
<query>ChatGPT最新进展</query>
<num_results>5</num_results>
<engines>bing,yahoo,zhihu</engines>
</search_web>''',
    execute_func=execute
)

# 测试代码
if __name__ == "__main__":
    load_dotenv()
    async def test():
        try:
            chrome_path = os.getenv("CHROME_PATH", "C:\\Program Files\\Google\\Chrome\\Application\\chrome.exe")
            if not os.path.exists(chrome_path):
                print(f"错误: Chrome浏览器路径不存在: {chrome_path}")
                return
                
            if not os.getenv("ZHIHU_COOKIES"):
                print("错误: 未设置知乎cookies")
                return
                
            results = await search_zhihu("Python教程")
            
            print(f"\n找到 {len(results)} 条结果:")
            if results:
                print(json.dumps(results, ensure_ascii=False, indent=2))
            else:
                print("未找到任何结果")
                
        except Exception as e:
            print(f"测试出错: {str(e)}")
    
    asyncio.run(test())