import requests
from bs4 import BeautifulSoup
import pandas as pd
import time

# 设置请求头
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36'}

# 爬取第1～3页的数据
page_start = 1
page_end = 3
data_list = []
for page in range(page_start, page_end + 1):
    # 构造目标网址
    url = f'https://bang.dangdang.com/books/bestsellers/01.41.00.00.00.00-recent30-0-0-1-{page}'
    # 向目标网址发起请求
    response = requests.get(url=url, headers=headers)
    response.encoding = 'gbk'
    # 用BeautifulSoup解析网页源代码
    soup = BeautifulSoup(response.text, 'lxml')
    # 从网页源代码中提取所需数据
    rankings = [int(i.get_text().strip().strip('.')) for i in soup.select('div.list_num')]
    titles = [i.get('title').strip() for i in soup.select('div.name > a')]
    pub_dates = [i.get_text().strip() for i in soup.select('div.publisher_info > span')]
    publishers = [i.get_text().strip() for i in soup.select('div.publisher_info > span + a')]
    prices = [float(i.get_text().strip().strip('¥')) for i in soup.select('div.price > p:nth-child(1) > span.price_r')]
    discounts = [float(i.get_text().strip().strip('¥')) for i in soup.select('div.price > p:nth-child(1) > span.price_n')]
    comments = [int(i.get_text().strip().replace('条评论', '')) for i in soup.select('div.star > a')]
    details = [i.get('href') for i in soup.select('div.name > a')]
    # 创建DataFrame
    data = {'排名': rankings, '书名': titles, '出版时间': pub_dates, '出版社': publishers, '定价': prices, '售价': discounts, '评论数': comments, '详情页': details}
    data_list.append(pd.DataFrame(data))
    # 适当暂停，以免触发反爬
    time.sleep(3)

# 整理和导出数据
df = pd.concat(data_list)
df.to_csv('bestselling_books.csv', index=False, encoding='utf-8-sig')
