import requests
from bs4 import BeautifulSoup
import time

# 正确网址
url = "https://book.douban.com/latest"  # 新书速递页面
headers = {
    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/136.0.0.0 Safari/537.36 Edg/136.0.0.0"
}


def crawl_douban_books():
    try:
        response = requests.get(url, headers=headers)
        response.raise_for_status()
        response.encoding = "utf-8"
        soup = BeautifulSoup(response.text, "html.parser")

        # 提取书籍列表
        book_items = soup.find_all("li", class_="cover")

        for item in book_items:
            # 书名
            title = item.find("a")["title"]
            # 详情页链接
            link = item.find("a")["href"]
            # 封面图片
            img_url = item.find("img")["src"]

            # 提取额外信息（需根据页面结构调整）
            info_div = item.find_next_sibling("div", class_="info")
            author = info_div.find("span", class_="author").text.strip() if info_div else ""
            pub_info = info_div.find("span", class_="pub").text.strip() if info_div else ""  # 包含出版时间、出版社等

            print(f"书名: {title}")
            print(f"作者: {author}")
            print(f"详情页: {link}")
            print(f"出版信息: {pub_info}\n")

            time.sleep(1)  # 避免请求过快

    except Exception as e:
        print(f"爬取失败: {str(e)}")


if __name__ == "__main__":
    crawl_douban_books()
