import requests
import csv
import time
from bs4 import BeautifulSoup
from urllib.parse import quote

# .venv\Scripts\activate.ps1  # 进入虚拟环境
# pip install requests beautifulsoup4

# 模拟浏览器请求头
headers = {
    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36",
    "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,*/*;q=0.8",
    "Accept-Language": "zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2",
    "Connection": "keep-alive",
}


def get_book_info(page):
    """爬取单页图书信息"""
    try:
        # 搜索关键词"技术图书"，进行URL编码
        keyword = quote("技术图书")
        # 京东搜索URL，page参数为页码（1,3,5...奇数）
        url = f"https://search.jd.com/Search?keyword={keyword}&wq={keyword}&page={page}&s={(page-1)*30+1}&click=0"

        # 发送请求
        response = requests.get(url, headers=headers)
        response.encoding = "utf-8"

        # 解析页面
        soup = BeautifulSoup(response.text, "html.parser")
        book_list = soup.select(".gl-item")  # 图书列表项

        books = []
        for item in book_list:
            # 提取图书信息
            title = (
                item.select_one(".p-name em").get_text(strip=True).replace("\n", "")
            )  # 书名
            price = item.select_one(".p-price i").get_text(strip=True)  # 价格
            author = (
                item.select_one(".p-bookdetails a").get_text(strip=True)
                if item.select_one(".p-bookdetails a")
                else "未知"
            )  # 作者
            publisher = (
                item.select(".p-bookdetails span")[-1].get_text(strip=True)
                if len(item.select(".p-bookdetails span")) > 1
                else "未知"
            )  # 出版社
            publish_date = (
                item.select(".p-bookdetails span")[-2].get_text(strip=True)
                if len(item.select(".p-bookdetails span")) > 2
                else "未知"
            )  # 出版日期
            comment_count = (
                item.select_one(".p-commit a").get_text(strip=True)
                if item.select_one(".p-commit a")
                else "0"
            )  # 评论数

            books.append(
                {
                    "书名": title,
                    "价格": price,
                    "作者": author,
                    "出版社": publisher,
                    "出版日期": publish_date,
                    "评论数": comment_count,
                }
            )

        print(f"第{page//2+1}页爬取完成，获取{len(books)}本图书信息")
        return books

    except Exception as e:
        print(f"爬取第{page//2+1}页失败：{str(e)}")
        return []


def save_to_csv(books, filename="jd_tech_books.csv"):
    """保存图书信息到CSV文件"""
    if not books:
        print("没有数据可保存")
        return

    # 表头
    fieldnames = ["书名", "价格", "作者", "出版社", "出版日期", "评论数"]

    with open(filename, "w", newline="", encoding="utf-8-sig") as f:
        writer = csv.DictWriter(f, fieldnames=fieldnames)
        writer.writeheader()
        writer.writerows(books)

    print(f"所有数据已保存到 {filename}，共{len(books)}本图书")


def main(total_pages=5):
    """主函数：爬取多页数据并保存"""
    all_books = []

    # 京东分页机制：page参数为1,3,5...奇数（实际页数=page//2+1）
    for i in range(total_pages):
        page = 2 * i + 1  # 转换为京东的page参数
        books = get_book_info(page)
        all_books.extend(books)

        # 控制爬取速度，避免被反爬
        if i < total_pages - 1:
            time.sleep(2)

    save_to_csv(all_books)


if __name__ == "__main__":
    # 爬取前5页数据（可根据需要修改）
    main(total_pages=5)
