import requests
from bs4 import BeautifulSoup
import urllib.parse
import time
import random
import re

class SearchEngine:
    def __init__(self, engine_type="baidu"):
        self.engine_type = engine_type
        self.headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
        }
        
    def search(self, query, num_results=6):
        """
        执行搜索并返回结果
        """
        if self.engine_type == "baidu":
            return self._search_baidu(query, num_results)
        elif self.engine_type == "google":
            return self._search_google(query, num_results)
        else:
            raise ValueError("不支持的搜索引擎类型")
            
    def _extract_content_from_url(self, url):
        """
        从URL中提取页面内容
        """
        try:
            response = requests.get(url, headers=self.headers, timeout=10)
            response.encoding = response.apparent_encoding
            
            soup = BeautifulSoup(response.text, 'html.parser')
            
            # 移除脚本和样式元素
            for script in soup(["script", "style"]):
                script.decompose()
                
            # 尝试提取主要内容区域
            content_selectors = [
                'article',
                '[role="main"]',
                'main',
                '.content',
                '.post-content',
                '.article-content',
                '.entry-content',
                'p'
            ]
            
            content = ""
            for selector in content_selectors:
                elements = soup.select(selector)
                if elements:
                    content = ' '.join([elem.get_text(strip=False) for elem in elements])
                    break
            
            if not content:
                # 如果没有找到特定内容区域，使用body
                body = soup.find('body')
                if body:
                    content = body.get_text(strip=False)
            
            # 清理文本：移除多余的空白字符，但保留段落结构
            content = re.sub(r'\n+', '\n', content)  # 将多个换行符合并为一个
            content = re.sub(r' +', ' ', content)    # 将多个空格合并为一个
            content = content.strip()
            
            # 如果内容太长，截取前2000个字符并添加省略号
            if len(content) > 2000:
                content = content[:2000] + '...'
                
            return content if content else "无法提取内容"
        except Exception as e:
            return f"内容提取失败: {str(e)}"
    
    def _search_baidu(self, query, num_results):
        """
        百度搜索实现
        """
        results = []
        search_url = f"https://www.baidu.com/s?wd={urllib.parse.quote(query)}&rn={num_results}"
        
        try:
            response = requests.get(search_url, headers=self.headers)
            response.encoding = 'utf-8'
            soup = BeautifulSoup(response.text, 'html.parser')
            
            # 解析百度搜索结果
            result_items = soup.find_all('div', class_='result')
            
            for item in result_items[:num_results]:
                try:
                    title_tag = item.find('h3', class_='t') or item.find('h3')
                    if title_tag:
                        title = title_tag.get_text(strip=True)
                        link_tag = title_tag.find('a')
                        url = link_tag['href'] if link_tag and link_tag.get('href') else ''
                        
                        # 尝试获取摘要
                        abstract_tag = item.find('div', class_='c-abstract') or item.find('div', class_='abstract')
                        if not abstract_tag:
                            abstract_tag = item.find('p')
                        abstract = abstract_tag.get_text(strip=True) if abstract_tag else '无摘要'
                        
                        # 提取页面内容
                        content = self._extract_content_from_url(url) if url else "无链接"
                        
                        results.append({
                            'title': title,
                            'url': url,
                            'abstract': abstract,
                            'content': content
                        })
                except Exception as e:
                    continue
                    
        except Exception as e:
            raise Exception(f"百度搜索出错: {str(e)}")
            
        return results
        
    def _search_google(self, query, num_results):
        """
        谷歌搜索实现（模拟）
        注意：实际的谷歌搜索可能需要API或特殊处理
        """
        results = []
        search_url = f"https://www.google.com/search?q={urllib.parse.quote(query)}&num={num_results}"
        
        try:
            response = requests.get(search_url, headers=self.headers)
            response.encoding = 'utf-8'
            soup = BeautifulSoup(response.text, 'html.parser')
            
            # 解析谷歌搜索结果
            result_items = soup.find_all('div', class_='g')
            
            for item in result_items[:num_results]:
                try:
                    # 获取标题和链接
                    title_tag = item.find('h3')
                    if title_tag:
                        title = title_tag.get_text(strip=True)
                        link_tag = item.find('a')
                        url = link_tag['href'] if link_tag and link_tag.get('href') else ''
                        
                        # 获取摘要
                        abstract_tag = item.find('span', class_='aCOpRe') or item.find('div', class_='VwiC3b')
                        if not abstract_tag:
                            abstract_tag = item.find('span') or item.find('div', class_='s3v9rd')
                        abstract = abstract_tag.get_text(strip=True) if abstract_tag else '无摘要'
                        
                        # 提取页面内容
                        content = self._extract_content_from_url(url) if url else "无链接"
                        
                        results.append({
                            'title': title,
                            'url': url,
                            'abstract': abstract,
                            'content': content
                        })
                except Exception as e:
                    continue
                    
        except Exception as e:
            raise Exception(f"谷歌搜索出错: {str(e)}")
            
        return results