import requests
from bs4 import BeautifulSoup
import time
import random
import csv


def crawl_douban_top250():
    # 配置请求头
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36',
        'Referer': 'https://book.douban.com/'
    }

    # 存储数据结构
    books = []

    # 分页爬取（共10页）
    for page in range(0, 250, 25):
        url = f"https://book.douban.com/top250?start={page}"

        try:
            # 带延迟的请求（2-5秒随机延迟）
            response = requests.get(url, headers=headers)
            response.raise_for_status()  # 检查HTTP状态码
            soup = BeautifulSoup(response.text, 'lxml')

            # 解析数据
            items = soup.find_all('tr', class_='item')
            for item in items:
                title = item.find('div', class_='pl2').a['title'].strip()
                rating = item.find('span', class_='rating_nums').text.strip()
                books.append([title, rating])

            print(f"已抓取第{page // 25 + 1}页数据")
            time.sleep(random.uniform(2, 5))

        except Exception as e:
            print(f"第{page // 25 + 1}页抓取失败：{str(e)}")
            continue

    # 保存数据
    with open('douban_top250.csv', 'w', newline='', encoding='utf-8-sig') as f:
        writer = csv.writer(f)
        writer.writerow(['书名', '评分'])
        writer.writerows(books)

    return len(books)


# 执行爬虫
print(f"共抓取到{crawl_douban_top250()}条数据")