# new_web_search.py
from mcp.server.fastmcp import FastMCP
import sys
import logging
import configparser
import requests
import time
import random
import threading
from bs4 import BeautifulSoup
from urllib.parse import quote, urljoin
import re

# 配置日志
logger = logging.getLogger('WebSearcher')
logging.basicConfig(
    level=logging.INFO, 
    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
    handlers=[logging.FileHandler('search_log.log'), logging.StreamHandler()]  # 同时输出到文件和控制台
)

# 修复Windows控制台编码
if sys.platform == 'win32':
    sys.stderr.reconfigure(encoding='utf-8')
    sys.stdout.reconfigure(encoding='utf-8')

def clean_config_value(value):
    """清理配置值：去除空格、注释，避免类型转换错误"""
    if not value:
        return value
    return value.split('#')[0].strip()

# 读取配置文件
config = configparser.ConfigParser()
config.read('config.ini', encoding='utf-8')

# 专业网站配置（清理URL，避免无效链接）
PROFESSIONAL_SITES = {}
if config.has_section('ProfessionalSites'):
    for site_name, site_url in config.items('ProfessionalSites'):
        clean_url = clean_config_value(site_url)
        if clean_url.startswith(('http://', 'https://')):  # 验证URL格式
            PROFESSIONAL_SITES[site_name] = clean_url
else:
    # 默认专业网站（确保URL有效）
    PROFESSIONAL_SITES = {
        '北京公共资源交易中心': 'https://ggzyfw.beijing.gov.cn/',
        '国家图书馆': 'http://www.nlc.cn/',
        '中国知网': 'https://www.cnki.net/'
    }

# 搜索配置
SEARCH_CONFIG = {
    'max_results': int(clean_config_value(config.get('SearchConfig', 'max_results', fallback='5'))),
    'max_pages': int(clean_config_value(config.get('SearchConfig', 'max_pages', fallback='1'))),
    'max_execution_time': int(clean_config_value(config.get('SearchConfig', 'max_execution_time', fallback='40'))),
    'request_delay_min': float(clean_config_value(config.get('SearchConfig', 'request_delay_min', fallback='0.2'))),
    'request_delay_max': float(clean_config_value(config.get('SearchConfig', 'request_delay_max', fallback='0.5')))
}

# 创建MCP服务器
mcp = FastMCP("WebSearcher")

def get_random_user_agent():
    """获取随机User-Agent，降低被百度拦截概率"""
    user_agents = [
        'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36',
        'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Edge/120.0.0.0 Safari/537.36',
        'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Firefox/119.0 Safari/537.36'
    ]
    return random.choice(user_agents)

def fetch_url(url, timeout=8):
    """通用URL请求"""
    headers = {
        'User-Agent': get_random_user_agent(),
        'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
        'Accept-Language': 'zh-CN,zh;q=0.9'
    }
    
    for attempt in range(2):
        try:
            response = requests.get(url, headers=headers, timeout=timeout, allow_redirects=True)
            response.raise_for_status()
            response.encoding = response.apparent_encoding
            logger.info(f"成功获取URL: {url}")
            return response.text
        except requests.exceptions.Timeout:
            logger.warning(f"URL {url} 请求超时（尝试 {attempt+1}/2），跳过该链接")
            return None
        except requests.exceptions.RequestException as e:
            logger.warning(f"URL {url} 获取失败（尝试 {attempt+1}/2）: {str(e)}")
            time.sleep(0.5 * (attempt + 1))
    logger.error(f"URL {url} 多次获取失败，放弃")
    return None

def is_advertisement(element):
    """轻量级广告判断"""
    if element is None:
        return False
    
    # 只保留核心广告关键词
    ad_keywords = ['广告', '推广', 'sponsored', 'ad']
    try:
        element_text = element.get_text().lower()[:100]
        for keyword in ad_keywords:
            if keyword in element_text:
                return True
    except:
        pass
    
    # 简化类名/ID判断
    try:
        if 'ad' in (element.get('class', []) + [element.get('id', '')]):
            return True
    except:
        pass
    
    return False

def extract_main_content(html):
    """简化内容提取"""
    if not html:
        return ""
    
    try:
        soup = BeautifulSoup(html, 'lxml')
    except Exception as e:
        logger.error(f"HTML解析失败: {str(e)}")
        return ""
    
    # 只移除关键冗余标签
    for tag in soup(['script', 'style', 'iframe']):
        try:
            tag.decompose()
        except:
            continue
    
    # 简化广告移除
    try:
        for element in soup.body.find_all(limit=50):
            if is_advertisement(element):
                element.decompose()
    except:
        pass
    
    # 直接提取body文本
    try:
        text = soup.body.get_text()[:2000]
        return re.sub(r'\s+', ' ', text).strip()
    except:
        return ""

def baidu_search(query, start_time):
    """简化百度搜索"""
    logger.info(f"开始百度搜索（关键词：{query}）")
    results = []
    page = 0
    max_results = SEARCH_CONFIG['max_results']
    max_exec = SEARCH_CONFIG['max_execution_time']
    
    while len(results) < max_results and page < SEARCH_CONFIG['max_pages']:
        # 严格超时控制
        if time.time() - start_time > max_exec:
            logger.warning(f"搜索超时（已用{time.time()-start_time:.1f}秒），返回{len(results)}条结果")
            return results
        
        # 百度搜索URL
        url = f"https://www.baidu.com/s?wd={quote(query)}&pn={page*10}&ie=utf-8"
        html = fetch_url(url)
        if not html:
            break
        
        try:
            soup = BeautifulSoup(html, 'lxml')
            # 只提取核心结果
            for result in soup.select('div.result.c-container', limit=max_results):
                if is_advertisement(result):
                    continue
                
                title_tag = result.select_one('h3.t a')
                if not title_tag:
                    continue
                
                # 简化结果提取
                results.append({
                    'title': title_tag.get_text(strip=True)[:50],
                    'url': title_tag.get('href', ''),
                    'abstract': result.select_one('div.c-abstract').get_text(strip=True)[:100] if result.select_one('div.c-abstract') else ""
                })
                
                if len(results) >= max_results:
                    break
        except Exception as e:
            logger.error(f"搜索结果解析失败: {str(e)}")
            break
        
        page += 1
        time.sleep(random.uniform(SEARCH_CONFIG['request_delay_min'], SEARCH_CONFIG['request_delay_max']))
    
    logger.info(f"百度搜索结束，获取{len(results)}条有效结果")
    return results

def professional_site_search(query, site_name, start_time):
    """简化专业网站查询"""
    if site_name not in PROFESSIONAL_SITES:
        return {"success": False, "error": f"未配置专业网站：{site_name}"}
    
    site_url = PROFESSIONAL_SITES[site_name]
    logger.info(f"访问专业网站：{site_name}（{site_url}）")
    
    # 超时检查
    if time.time() - start_time > SEARCH_CONFIG['max_execution_time']:
        return {"success": False, "error": "操作超时"}
    
    # 获取网站内容
    html = fetch_url(site_url, timeout=6)
    if not html:
        return {"success": False, "error": f"无法访问网站：{site_url}"}
    
    # 北京公共资源交易中心处理
    if site_name == '北京公共资源交易中心':
        try:
            soup = BeautifulSoup(html, 'lxml')
            tender_notices = []
            # 只取前3条招标信息
            for notice in soup.select('div.notice-item', limit=3):
                try:
                    title = notice.select_one('a').get_text(strip=True)[:50]
                    link = urljoin(site_url, notice.select_one('a')['href'])
                    tender_notices.append({"title": title, "url": link})
                except:
                    continue
            
            return {
                "success": True,
                "site": site_name,
                "url": site_url,
                "results": tender_notices,
                "hint": "已提取3条最新招标信息，点击链接查看详情"
            }
        except Exception as e:
            return {"success": False, "error": f"解析招标信息失败：{str(e)}"}
    
    # 通用专业网站处理
    main_content = extract_main_content(html)[:1500]
    return {
        "success": True,
        "site": site_name,
        "url": site_url,
        "content": main_content,
        "execution_time": f"{time.time()-start_time:.1f}秒"
    }

@mcp.tool()
def web_search(query: str, professional_site: str = None) -> dict:
    """
    网络搜索工具
    
    参数：
        query: 搜索关键词（必填，如"北京六建最新消息"）
        professional_site: 专业网站名称（可选，如"北京公共资源交易中心"）
    
    返回：
        结构化搜索结果（含成功状态、结果数量、内容摘要）
    """
    start_time = time.time()
    try:
        # 优先处理专业网站
        if professional_site:
            result = professional_site_search(query, professional_site, start_time)
            result['execution_time'] = f"{time.time()-start_time:.1f}秒"
            return result
        
        # 通用百度搜索
        search_results = baidu_search(query, start_time)
        # 只提取前3个结果的详情
        detailed_results = []
        for res in search_results[:3]:
            if time.time() - start_time > SEARCH_CONFIG['max_execution_time']:
                logger.warning("详情提取超时，停止后续请求")
                break
            
            content = extract_main_content(fetch_url(res['url'], timeout=5))
            detailed_results.append({
                **res,
                'content': content
            })
            time.sleep(random.uniform(0.2, 0.3))
        
        return {
            "success": True,
            "query": query,
            "result_count": len(detailed_results),
            "results": detailed_results,
            "execution_time": f"{time.time()-start_time:.1f}秒",
            "hint": "结果已精简，优先展示核心信息"
        }
    
    except Exception as e:
        error_msg = f"搜索失败：{str(e)}"
        logger.error(error_msg, exc_info=True)
        return {
            "success": False,
            "error": error_msg,
            "execution_time": f"{time.time()-start_time:.1f}秒"
        }

if __name__ == "__main__":
    # 启动MCP服务
    logger.info("=== WebSearcher MCP服务启动 ===")
    logger.info(f"当前配置：{SEARCH_CONFIG}")
    try:
        mcp.run(transport="stdio")
    except Exception as e:
        logger.critical(f"MCP服务启动失败：{str(e)}", exc_info=True)
        raise