import requests
from bs4 import BeautifulSoup
import json
import time
import random
import urllib.parse
import threading 


#百度搜索
def baidu_search(query):
    search_url = f"https://www.baidu.com/s?wd={query}"
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/132.0.0.0 Safari/537.36',
        'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
        'Accept-Language': 'zh-CN,zh;q=0.9',
        'Referer': 'https://www.baidu.com/',
        'Connection': 'keep-alive',
        'Upgrade-Insecure-Requests': '1'
    }

    try:
        # 发送搜索请求
        response = requests.get(search_url, headers=headers, timeout=20)
        response.encoding = response.apparent_encoding
        soup = BeautifulSoup(response.text, 'html.parser')

        # 提取前 10 条搜索结果链接
        result_links = []
        # 尝试不同的选择器
        selectors = [
            '.result.c-container',
            'div#content_left > div'
        ]
        for selector in selectors:
            for item in soup.select(selector, limit=10 - len(result_links)):
                link = item.select_one('h3.t a') or item.select_one('a.c-container')
                if link:
                    href = link.get('href')
                    if href:
                        result_links.append(href)
                        if len(result_links) >= 10:
                            break
            if len(result_links) >= 10:
                break
        
        time.sleep(random.uniform(1, 3))
        # 访问链接并解析网页主题内容
        search_results = []
        for url in result_links:
            time.sleep(random.uniform(1, 3))
            if len(search_results) >= 3:  # 只取前 3 条结果
                break
            try:
                page_response = requests.get(url, headers=headers, timeout=20)
                page_response.encoding = page_response.apparent_encoding
                page_soup = BeautifulSoup(page_response.text, 'html.parser')

                # 提取网页标题
                title = page_soup.title.string if page_soup.title else "无标题"

                # 提取网页主要内容，这里简单提取所有段落文本
                paragraphs = page_soup.find_all('p')
                content = ' '.join([p.get_text(strip=True) for p in paragraphs])
                content = content[:450]  # 只取前 450 字

                if content:  # 只添加 content 非空的结果
                    search_results.append({
                        "url": url,
                        "title": title,
                        "content": content
                    })
            except Exception as e:
                if len(search_results) < 5:
                    search_results.append({
                        "url": url,
                        "title": "访问失败",
                        "content": f"访问该网页时出错: {str(e)}"[:80]
                    })

        return json.dumps({"search_engines": "baidu", "result": search_results}, ensure_ascii=False)
    except Exception as e:
        return json.dumps({"error": f"搜索失败: {str(e)}"}, ensure_ascii=False)


#搜狗搜索
def sogou_search(query):
    # 编码查询参数
    encoded_query = urllib.parse.quote(query)
    # 基础搜索URL，移除可能导致问题的时间戳参数
    search_url = f"https://www.sogou.com/web?query={encoded_query}"
    
    # 使用更真实的浏览器请求头
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36',
        'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
        'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
        'Referer': 'https://www.sogou.com/',
        'Connection': 'keep-alive',
        'Upgrade-Insecure-Requests': '1',
        'Sec-Fetch-Dest': 'document',
        'Sec-Fetch-Mode': 'navigate',
        'Sec-Fetch-Site': 'same-origin',
        'Sec-Fetch-User': '?1',
    }

    try:
        # 发送搜索请求
        response = requests.get(search_url, headers=headers, timeout=15)
        response.encoding = response.apparent_encoding
        
        # 检查是否被反爬拦截
        if response.status_code != 200:
            return json.dumps({"error": f"请求失败，状态码: {response.status_code}"}, ensure_ascii=False)
            
        # 简单验证页面内容
        if "请输入验证码" in response.text or "安全验证" in response.text:
            return json.dumps({"error": "触发了搜狗的反爬机制，请稍后再试或调整请求策略"}, ensure_ascii=False)
            
        soup = BeautifulSoup(response.text, 'html.parser')
        
        # 提取搜索结果项
        results = []
        # 使用更通用的选择器，适应页面结构变化
        for item in soup.select('.vrwrap', limit=10):
            try:
                title_elem = item.select_one('h3 a')
                if not title_elem:
                    continue
                    
                title = title_elem.get_text(strip=True)
                href = title_elem.get('href', '')
                
                # 处理搜狗重定向URL
                if href.startswith('/link?url='):
                    # 提取真实URL参数
                    parsed = urllib.parse.urlparse(href)
                    real_url = urllib.parse.parse_qs(parsed.query).get('url', [None])[0]
                    
                    if real_url:
                        # 解码并清理URL
                        href = urllib.parse.unquote(real_url)
                        # 确保URL以http开头
                        if not href.startswith(('http://', 'https://')):
                            href = 'https://' + href
                
                # 提取摘要内容
                content_elem = item.select_one('.str-info') or item.select_one('.text-layout')
                content = content_elem.get_text(strip=True) if content_elem else "无摘要"
                
                if content == "无摘要":
                    continue
                
                # 限制内容长度
                content = content[:450]
                
                results.append({
                    "title": title,
                    "url": href,
                    "content": content
                })
                if len(results) >= 3:
                    break
                # 添加随机延迟，避免请求过于频繁
                time.sleep(random.uniform(1, 2))
                
            except Exception as e:
                print(f"处理搜索结果时出错: {e}")
                continue
        
        return json.dumps({"search_engine": "sogou", "results": results}, ensure_ascii=False, indent=2)
    
    except Exception as e:
        return json.dumps({"error": f"搜索过程发生错误: {str(e)}"}, ensure_ascii=False)


# def web_search(query):
#     baidu_result = baidu_search(query)
#     sougou_result = sogou_search(query)

#     return[baidu_result,sougou_result]

def web_search(query):
    # 定义用于存储结果的列表（线程共享）
    results = [None, None]  # 索引0存百度结果，索引1存搜狗结果

    # 定义线程执行的函数（包装原搜索函数，将结果存入列表）
    def run_baidu():
        results[0] = baidu_search(query)
    
    def run_sogou():
        results[1] = sogou_search(query)
    
    # 创建线程
    thread_baidu = threading.Thread(target=run_baidu)
    thread_sogou = threading.Thread(target=run_sogou)
    
    # 启动线程
    thread_baidu.start()
    thread_sogou.start()
    
    # 等待两个线程执行完毕
    thread_baidu.join()
    thread_sogou.join()
    
    # 返回合并后的结果
    return [results[0], results[1]]


# 执行联网搜索
def search_online(query):
    search_url = f"https://www.sogou.com/web?query={query}"
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3'
    }
    print('关键词：'+ query)
    try:
        # 发出搜索请求
        response = requests.get(search_url, headers=headers)
        response.encoding = response.apparent_encoding  # 确保正确处理编码
        soup = BeautifulSoup(response.text, 'html.parser')
        
        # 查找指定的 div 元素       
        # 用于存储匹配到的网页链接
        page_urls = []
        for result in soup.find_all('a', class_='vr-title', limit=10):
            link = result.get('href')
            if link:
                page_urls.append(link)
                print(link)
                if len(page_urls) >= 10:
                    break
        
        # 访问网页并提取内容
        page_contents = []
        for url in page_urls:
            try:
                page_response = requests.get(url, headers=headers, timeout=10)
                page_response.encoding = page_response.apparent_encoding
                page_soup = BeautifulSoup(page_response.text, 'html.parser')
                # 提取网页的文本内容
                page_text = page_soup.get_text(separator='\n')
                page_contents.append(f"网页链接: {url}\n网页内容:\n{page_text}")
            except Exception as e:
                page_contents.append(f"访问网页 {url} 失败: {str(e)}")
        
        return '\n\n'.join(page_contents)
    except Exception as e:
        return f"搜索失败: {str(e)}"
