import os
import json
import requests
from bs4 import BeautifulSoup
from urllib.parse import urlparse
import trafilatura
import logging
import re

class WebSearcher:
    def __init__(self, timeout=10):
        self.timeout = timeout
        self.headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
        }

    def extract_main_content(self, url):
        """
        1. 使用trafilatura库提取网页主要内容
        :param url: 网页URL
        :return: 提取的主要文本内容(字符串)
        """
        try:
            # 使用requests获取网页内容
            response = requests.get(url, headers=self.headers, timeout=self.timeout)
            response.raise_for_status()

            # 使用trafilatura提取主要内容
            extracted_content = trafilatura.extract(
                response.content.decode('utf-8', errors='replace'),  # 使用 UTF-8 解码
                include_comments=False,
                include_tables=False,
                no_fallback=True
            )
            extracted_content = re.sub(r'\s+', ' ', extracted_content).strip()
            # 内容长度控制和清洗
            if extracted_content:
                # 限制内容长度，避免超长
                return extracted_content[:100000].strip()
            return "无法提取有效内容"

        except Exception as e:
            print(f"内容提取错误 {url}: {e}")
            return f"内容提取失败: {str(e)}"

    # 搜索
    def search_web(self, query, start=1, num_results=5, get_content=True):
        """
        2. 通过Google Search API进行网络搜索
        :param query: 搜索关键词
        :param num_results: 返回结果数量
        :param get_content: 是否获取页面内容
        :return: {
                'status': 'success',
                'query': query,
                'results': [{
                    'title': '',
                    'link': '',
                    'snippet': '',
                    'content':''
                }]
            }
        """
        try:
            url = f"https://www.googleapis.com/customsearch/v1?key=AIzaSyAr5OkcqVi1AfaF9_veGsGLoPtpPSXwgn8&q={query}&cx=a12d7b1ae2d2f4dae&start={start}&num={num_results}"
            response = requests.get(url)
            # 解析返回的JSON数据
            search_data = response.json()
            
            # 格式化搜索结果
            results = []
            content = ''
            index = 0
            for item in search_data.get('items'):
                index += 1
                if(get_content and item.get('link')):
                    content = self.extract_main_content(item.get('link'))

                results.append({
                    'title': item.get('title', ''),
                    'link': item.get('link', ''),
                    'snippet': re.sub(r'\s+', ' ', item.get('snippet', '')).strip(),
                    'content': content
                })
                print(f"{index} 搜索结果：标题={item.get('title', '')}，\n 链接={item.get('link', '')}，\n 摘要={item.get('snippet')}，\n 内容={content}\n\n")

            return {
                'status': 'success',
                'query': query,
                'results': results
            }
        except Exception as e:
            return {
                'status': 'error',
                'message': str(e)
            }
