import requests
import time
import random
from bs4 import BeautifulSoup
from openpyxl import Workbook
# 豆瓣图书Top250页面（高评分书籍集合），共10页
base_url = "https://book.douban.com/top250?start={}"
headers_list = [
    "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36",
    "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/16.5 Safari/605.1.15",
    "Mozilla/5.0 (Linux; Android 12; MI 11) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Mobile Safari/537.36"
]
all_books = []
def get_book_details(book_url):
    headers = {"User-Agent": random.choice(headers_list)}
    try:
        time.sleep(random.uniform(1.5, 3))
        response = requests.get(book_url, headers=headers, timeout=10)
        response.raise_for_status()
        response.encoding = "utf-8"
        soup = BeautifulSoup(response.text, "html.parser")
        info_tag = soup.find("div", id="info")
        info_text = info_tag.get_text(strip=True) if info_tag else ""
        author = "未知"
        publisher = "未知"
        pub_time = "未知"
        price = "未知"
        isbn = "未知"
        if "作者:" in info_text:
            author = info_text.split("作者:")[1].split("出版社:")[0].strip()
        if "出版社:" in info_text:
            publisher = info_text.split("出版社:")[1].split("出版年:")[0].strip()
        if "出版年:" in info_text:
            pub_time = info_text.split("出版年:")[1].split("页数:")[0].strip()
        if "页数:" in info_text:
            pages = info_text.split("页数:")[1].split("定价:")[0].strip()
        else:
            pages = "未知"
        if "定价:" in info_text:
            price = info_text.split("定价:")[1].split("装帧:")[0].strip()
        if "ISBN:" in info_text:
            isbn = info_text.split("ISBN:")[1].strip()
        return {
            "作者": author,
            "出版社": publisher,
            "出版时间": pub_time,
            "页数": pages,
            "价格": price,
            "ISBN": isbn
        }
    except Exception as e:
        print(f"获取书籍详情失败（{book_url}）：{e}")
        return {
            "作者": "获取失败",
            "出版社": "获取失败",
            "出版时间": "获取失败",
            "页数": "获取失败",
            "价格": "获取失败",
            "ISBN": "获取失败"
        }
def crawl_top_books():
    """爬取豆瓣图书Top250列表页，获取书名、评分、评论数及详情页链接"""
    wb = Workbook()
    ws = wb.active
    # 写入Excel表头
    ws.append(["书名", "作者", "出版社", "出版时间", "页数", "价格", "ISBN", "评分", "评论数量"])

    for page in range(100):  # Top250共10页，每页25本
        start = page * 25
        url = base_url.format(start)
        headers = {
            "User-Agent": random.choice(headers_list),
            "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8"
        }

        try:
            print(f"正在爬取第{page + 1}页...")
            time.sleep(random.uniform(2, 4))  # 页面间延时更长
            response = requests.get(url, headers=headers, timeout=10)
            response.raise_for_status()
            soup = BeautifulSoup(response.text, "html.parser")

            # 定位书籍列表
            book_list = soup.find("div", class_="indent").find_all("table")
            for book in book_list:
                # 提取书名和详情页链接
                title_tag = book.find("div", class_="pl2").find("a")
                book_name = title_tag.get_text(strip=True).replace("\n", "").replace(" ", "")
                book_url = title_tag["href"]

                # 提取评分和评论数
                rating_tag = book.find("span", class_="rating_nums")
                rating = rating_tag.get_text(strip=True) if rating_tag else "无评分"
                comment_tag = book.find("span", class_="pl")
                comment_count = comment_tag.get_text(strip=True) if comment_tag else "0条评论"

                # 获取详情页信息（作者、出版社等）
                detail_info = get_book_details(book_url)

                # 写入Excel
                ws.append([
                    book_name,
                    detail_info["作者"],
                    detail_info["出版社"],
                    detail_info["出版时间"],
                    detail_info["页数"],
                    detail_info["价格"],
                    detail_info["ISBN"],
                    rating,
                    comment_count
                ])
                print(f"已爬取：《{book_name}》")

        except Exception as e:
            print(f"爬取第{page + 1}页失败：{e}")
            continue

    # 保存Excel文件
    wb.save("豆瓣高评分书籍信息.xlsx")
    print("爬取完成，数据已保存至《豆瓣高评分书籍信息.xlsx》")
if __name__ == "__main__":
    # 安装依赖提示
    print("运行前请确保已安装依赖：pip install requests beautifulsoup4 openpyxl")
    crawl_top_books()



