import requests
from bs4 import BeautifulSoup
import time

def crawl_website(url, delay=1):
    """
    爬取指定网站的内容
    
    Args:
        url (str): 要爬取的网址
        delay (int): 请求间隔时间（秒）
    
    Returns:
        BeautifulSoup对象: 解析后的网页内容
    """
    try:
        # 设置请求头，模拟浏览器访问
        headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
        }
        
        # 发送HTTP请求
        response = requests.get(url, headers=headers)
        response.raise_for_status()  # 检查请求是否成功
        
        # 解析网页内容
        soup = BeautifulSoup(response.text, 'html.parser')
        
        # 延时避免过于频繁的请求
        time.sleep(delay)
        
        return soup
        
    except requests.RequestException as e:
        print(f"请求错误: {e}")
        return None
    except Exception as e:
        print(f"解析错误: {e}")
        return None

def extract_data(soup, tag, class_name=None):
    """
    从解析的网页中提取数据
    
    Args:
        soup (BeautifulSoup): 解析后的网页内容
        tag (str): 要查找的HTML标签
        class_name (str, optional): CSS类名
    
    Returns:
        list: 提取的数据列表
    """
    if not soup:
        return []
    
    try:
        if class_name:
            elements = soup.find_all(tag, class_=class_name)
        else:
            elements = soup.find_all(tag)
        
        # 提取文本内容
        data = [element.get_text(strip=True) for element in elements]
        return data
        
    except Exception as e:
        print(f"数据提取错误: {e}")
        return []

# 使用示例
if __name__ == "__main__":
    # 目标网址
    target_url = "https://cn.bing.com/images/search?q=%e7%94%b5%e8%84%91%e5%a3%81%e7%ba%b8%e9%ab%98%e6%b8%85%e5%85%a8%e5%b1%8f4k&id=F62D07F49C861106AB73873A4AE4E7CEF23C275F&form=IACFIR&first=1&disoverlay=1"
    
    # 爬取网页
    page_content = crawl_website(target_url)
    
    if page_content:
        # 提取标题
        titles = extract_data(page_content, 'h1')
        print("页面标题:")
        for title in titles:
            print(f"  {title}")
        
        # 提取段落文本
        paragraphs = extract_data(page_content, 'p')
        print("\n段落内容:")
        for p in paragraphs[:3]:  # 只显示前3个段落
            print(f"  {p[:100]}...")  # 只显示前100个字符