import requests  # 导入requests库，用于网络请求
from bs4 import BeautifulSoup  # 导入BeautifulSoup用于解析HTML
import pandas as pd  # 导入pandas用于数据保存

# 商品列表页URL（以京东为例，实际使用时请更换为目标电商网站）
url = 'https://search.jd.com/Search?keyword=手机'

headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36'
}

try:
    response = requests.get(url, headers=headers, timeout=10)
    response.raise_for_status()
    html = response.text
    soup = BeautifulSoup(html, 'html.parser')
    # 解析商品信息（这里只做简单示例，实际页面结构需根据目标网站调整）
    items = []
    for li in soup.select('.gl-item'):
        title = li.select_one('.p-name em')
        price = li.select_one('.p-price i')
        if title and price:
            items.append({'商品名称': title.text.strip(), '价格': price.text.strip()})
    # 保存为Excel报表
    df = pd.DataFrame(items)
    df.to_excel('jd_phones.xlsx', index=False)
    print('商品价格报表已生成：jd_phones.xlsx')
except Exception as e:
    print(f'爬取失败：{e}') 