import requests
from bs4 import BeautifulSoup
import json
import time
import random
import urllib.parse

def sogou_search(query):
    # 编码查询参数
    encoded_query = urllib.parse.quote(query)
    # 基础搜索URL，移除可能导致问题的时间戳参数
    search_url = f"https://www.sogou.com/web?query={encoded_query}"
    
    # 使用更真实的浏览器请求头
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36',
        'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
        'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
        'Referer': 'https://www.sogou.com/',
        'Connection': 'keep-alive',
        'Upgrade-Insecure-Requests': '1',
        'Sec-Fetch-Dest': 'document',
        'Sec-Fetch-Mode': 'navigate',
        'Sec-Fetch-Site': 'same-origin',
        'Sec-Fetch-User': '?1',
    }

    try:
        # 发送搜索请求
        response = requests.get(search_url, headers=headers, timeout=15)
        response.encoding = response.apparent_encoding
        
        # 检查是否被反爬拦截
        if response.status_code != 200:
            return json.dumps({"error": f"请求失败，状态码: {response.status_code}"}, ensure_ascii=False)
            
        # 简单验证页面内容
        if "请输入验证码" in response.text or "安全验证" in response.text:
            return json.dumps({"error": "触发了搜狗的反爬机制，请稍后再试或调整请求策略"}, ensure_ascii=False)
            
        soup = BeautifulSoup(response.text, 'html.parser')
        
        # 提取搜索结果项
        results = []
        # 使用更通用的选择器，适应页面结构变化
        for item in soup.select('.vrwrap', limit=10):
            try:
                title_elem = item.select_one('h3 a')
                if not title_elem:
                    continue
                    
                title = title_elem.get_text(strip=True)
                href = title_elem.get('href', '')
                
                # 处理搜狗重定向URL
                if href.startswith('/link?url='):
                    # 提取真实URL参数
                    parsed = urllib.parse.urlparse(href)
                    real_url = urllib.parse.parse_qs(parsed.query).get('url', [None])[0]
                    
                    if real_url:
                        # 解码并清理URL
                        href = urllib.parse.unquote(real_url)
                        # 确保URL以http开头
                        if not href.startswith(('http://', 'https://')):
                            href = 'https://' + href
                
                # 提取摘要内容
                content_elem = item.select_one('.str-info') or item.select_one('.text-layout')
                content = content_elem.get_text(strip=True) if content_elem else "无摘要"
                
                if content == "无摘要":
                    continue
                
                # 限制内容长度
                content = content[:450]
                
                results.append({
                    "title": title,
                    "url": href,
                    "content": content
                })
                if len(results) >= 5:
                    break
                # 添加随机延迟，避免请求过于频繁
                time.sleep(random.uniform(1, 2))
                
            except Exception as e:
                print(f"处理搜索结果时出错: {e}")
                continue
        
        return json.dumps({"search_engine": "sogou", "results": results}, ensure_ascii=False, indent=2)
    
    except Exception as e:
        return json.dumps({"error": f"搜索过程发生错误: {str(e)}"}, ensure_ascii=False)

# 示例调用
if __name__ == "__main__":
    query = "2025年第一季度GDP"
    print("正在搜索:", query)
    results = sogou_search(query)
    
    # # 美化输出
    # try:
    #     parsed = json.loads(results)
    #     print(json.dumps(parsed, ensure_ascii=False, indent=2))
    # except:
    print(results)