import requests
from bs4 import BeautifulSoup
 
# 获取网页内容
url = "https://www.python.org"
response = requests.get(url)
 
# 解析 HTML
soup = BeautifulSoup(response.text, "lxml")
 
# 提取标题
title = soup.title.string
print("网页标题:", title)


from bs4 import BeautifulSoup
 
html = "<html><head><title>Python 爬虫</title></head><body><h1>Hello, World!</h1></body></html>"
soup = BeautifulSoup(html, "lxml")
 
print(soup.prettify())  # 格式化输出 HTML


import requests
from bs4 import BeautifulSoup

html = """
<html>
    <head><title>Python 爬虫</title></head>
    <body>
        <h1>标题</h1>
        <p class="content">段落 1</p>
        <p class="content">段落 2</p>
        <a href="https://www.python.org" id="link">Python 官网</a>
    </body>
</html>
"""
soup = BeautifulSoup(html, "lxml")
 
# 获取标题
print(soup.title.string)  # 输出: Python 爬虫
 
# 查找第一个段落
print(soup.find("p").string)  # 输出: 段落 1
 
# 查找所有段落
paragraphs = soup.find_all("p")
for p in paragraphs:
    print(p.string)  # 输出: 段落 1, 段落 2
 
# 使用 CSS 选择器查找链接
link = soup.select_one("#link")
print(link["href"])  # 输出: https://www.python.org





import requests
from bs4 import BeautifulSoup

html = """
<table>
    <tr><th>姓名</th><th>年龄</th></tr>
    <tr><td>张三</td><td>25</td></tr>
    <tr><td>李四</td><td>30</td></tr>
</table>
"""
soup = BeautifulSoup(html, "lxml")
 
# 提取表格数据
rows = soup.find_all("tr")
for row in rows:
    cells = row.find_all(["th", "td"])
    print([cell.string for cell in cells])



import requests
from bs4 import BeautifulSoup
 
base_url = "https://example.com/page="
for page in range(1, 6):  # 假设有 5 页
    url = f"{base_url}{page}"
    response = requests.get(url)
    soup = BeautifulSoup(response.text, "lxml")
 
    # 抓取当前页面的数据
    items = soup.find_all("div", class_="item")
    for item in items:
        print(item.text.strip())