import os
import requests
from bs4 import BeautifulSoup
from urllib.parse import urlparse
import time

# 配置API密钥和搜索引擎ID
google_search_key = os.getenv("GOOGLE_SEARCH_API_KEY") or 'AIzaSyDWJzZ0311dccCZX6L0_MygaeVapXjQ_S0'
cse_id = os.getenv("CSE_ID") or 'b03cff97419bc4106'
search_term = "印巴最新消息"

# 设置请求头，模拟浏览器访问
headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
}

# 构建请求URL和参数
url = "https://www.googleapis.com/customsearch/v1"
params = {
    'q': search_term,
    'key': google_search_key,
    'cx': cse_id,
    'num': 5  # 限制结果数量
}

def extract_main_content(page_url):
    """从网页URL提取主要内容"""
    try:
        # 获取网页内容
        response = requests.get(page_url, headers=headers, timeout=10)
        response.raise_for_status()
        
        # 解析HTML
        soup = BeautifulSoup(response.text, 'html.parser')
        
        # 尝试多种方式提取正文内容
        # 1. 首先查找常见的文章正文标签
        article = soup.find('article')
        if article:
            return article.get_text(strip=True, separator='\n')
        
        # 2. 查找具有特定class的div
        main_content = soup.find('div', class_=['content', 'article-body', 'post-content'])
        if main_content:
            return main_content.get_text(strip=True, separator='\n')
        
        # 3. 作为最后手段，使用整个body
        return soup.body.get_text(strip=True, separator='\n') if soup.body else "无法提取正文内容"
    
    except Exception as e:
        return f"提取内容时出错: {str(e)}"

try:
    # 发送Google搜索请求
    print(f"正在搜索: {search_term}...")
    response = requests.get(url, params=params, headers=headers)
    response.raise_for_status()
    
    # 解析JSON响应
    data = response.json()
    
    # 检查是否有搜索结果
    if 'items' in data:
        print(f"\n找到 {len(data['items'])} 条结果:")
        for i, item in enumerate(data['items'], start=1):
            print(f"\n{'='*50}")
            print(f"结果 {i}:")
            print(f"标题: {item.get('title', '无标题信息')}")
            print(f"链接: {item.get('link', '无链接信息')}")
            print(f"摘要: {item.get('snippet', '无摘要信息')}")
            
            # 提取并显示主要内容
            page_url = item.get('link')
            if page_url:
                print("\n正在提取正文内容...")
                domain = urlparse(page_url).netloc
                print(f"来源网站: {domain}")
                
                content = extract_main_content(page_url)
                print("\n主要内容:")
                print(content[:1000] + "..." if len(content) > 1000 else content)  # 限制输出长度
                
                # 礼貌性延迟，避免请求过于频繁
                time.sleep(2)
                
    else:
        print("没有找到相关搜索结果。")

except requests.exceptions.RequestException as e:
    print(f"请求发生错误: {e}")
except ValueError as e:
    print(f"解析JSON响应时出错: {e}")
except Exception as e:
    print(f"发生未知错误: {e}")