#!/usr/bin/env python3
"""
百度网站链接过滤演示
展示优化后的链接过滤效果
"""
import os
import sys
import json
from pathlib import Path

# 添加项目根目录到Python路径
sys.path.insert(0, str(Path(__file__).resolve().parent.parent))

from tools.web_search import WebSearchTool
from bs4 import BeautifulSoup

def extract_links(html_content):
    """从HTML内容中提取所有链接"""
    soup = BeautifulSoup(html_content, 'html.parser')
    links = []
    
    for link in soup.find_all('a'):
        href = link.get('href')
        text = link.text.strip()
        if href:
            links.append({
                'href': href,
                'text': text if text else '(无文本)'
            })
    
    return links

def main():
    """主函数"""
    # 创建输出目录
    output_dir = Path('./results/baidu_demo')
    output_dir.mkdir(parents=True, exist_ok=True)
    
    url = 'https://www.baidu.com'
    print(f"正在演示百度网站链接过滤功能: {url}")
    
    try:
        # 1. 使用body模式不过滤链接（先禁用过滤功能来展示对比）
        search_tool_unfiltered = WebSearchTool(
            headless=True,
            output_dir=str(output_dir),
            output_format='markdown',
            extract_mode='body'
        )
        
        # 临时禁用链接过滤
        from tools.content_extractor import ContentExtractor
        original_clean_links = ContentExtractor._clean_invalid_links
        ContentExtractor._clean_invalid_links = lambda self, content: None
        
        # 搜索网页获取未过滤结果
        result_unfiltered = search_tool_unfiltered.search(url)
        html_unfiltered = result_unfiltered.get('raw_html', '')
        links_unfiltered = extract_links(html_unfiltered)
        
        # 恢复链接过滤功能
        ContentExtractor._clean_invalid_links = original_clean_links
        
        # 2. 使用过滤功能
        search_tool_filtered = WebSearchTool(
            headless=True,
            output_dir=str(output_dir),
            output_format='markdown',
            extract_mode='body'
        )
        
        # 搜索网页获取过滤后结果
        result_filtered = search_tool_filtered.search(url)
        html_filtered = result_filtered.get('raw_html', '')
        links_filtered = extract_links(html_filtered)
        
        # 保存结果
        with open(output_dir / 'links_unfiltered.json', 'w', encoding='utf-8') as f:
            json.dump(links_unfiltered, f, ensure_ascii=False, indent=2)
            
        with open(output_dir / 'links_filtered.json', 'w', encoding='utf-8') as f:
            json.dump(links_filtered, f, ensure_ascii=False, indent=2)
        
        # 输出比较结果
        print(f"\n链接过滤效果:")
        print(f"过滤前链接数: {len(links_unfiltered)}")
        print(f"过滤后链接数: {len(links_filtered)}")
        print(f"过滤掉了 {len(links_unfiltered) - len(links_filtered)} 个无效链接")
        
        # 显示过滤后的有效链接示例
        print(f"\n过滤后的有效链接示例:")
        for i, link in enumerate(links_filtered[:10], 1):
            print(f"{i}. {link['text']} -> {link['href']}")
        
        print(f"\n结果已保存到:")
        print(f"- {output_dir / 'links_unfiltered.json'}")
        print(f"- {output_dir / 'links_filtered.json'}")
        
        return 0
    except Exception as e:
        print(f"错误: {str(e)}")
        return 1

if __name__ == "__main__":
    sys.exit(main()) 