import time
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.chrome.options import Options
from selenium.common.exceptions import NoSuchElementException
import csv
import os

# 设置ChromeDriver路径
chrome_driver_path = r"D:\download\chromedriver-win64\chromedriver-win64\chromedriver.exe"

# 配置Chrome选项
chrome_options = Options()
chrome_options.add_argument('--headless')  # 无头模式，不打开浏览器窗口
chrome_options.add_argument('--disable-gpu')
chrome_options.add_argument('--no-sandbox')
chrome_options.add_argument('--disable-dev-shm-usage')
chrome_options.add_argument('--blink-settings=imagesEnabled=false')  # 不加载图片，加快速度

# 启动浏览器
service = Service(chrome_driver_path)
driver = webdriver.Chrome(service=service, options=chrome_options)


# 准备CSV文件
csv_file = '../dangdang_books_zhuangji.csv'
if os.path.exists(csv_file):
    os.remove(csv_file)

with open(csv_file, 'w', newline='', encoding='utf-8-sig') as f:
    writer = csv.writer(f)
    writer.writerow(['编号', '书名', '价格', '作者', '出版社', '评论数', '图片URL'])

# 初始化编号
book_id = 1

# 爬取数据
for page in range(1, 101):
    url = f'http://category.dangdang.com/pg{page}-cp01.38.00.00.00.00.html'
    print(f'正在爬取第 {page} 页: {url}')

    driver.get(url)
    time.sleep(2)  # 等待页面加载

    # 获取所有图书元素
    book_items = driver.find_elements(By.XPATH, '//ul[@class="bigimg"]/li')
    print(f'找到 {len(book_items)} 本图书')

    # 提取每本书的信息
    for item in book_items:
        try:
            # 书名
            title = item.find_element(By.XPATH, './/a[@name="itemlist-title"]').get_attribute('title')

            # 价格
            price = item.find_element(By.XPATH, './/span[@class="search_now_price"]').text.replace('¥', '')

            # 作者和出版社
            author_pub = item.find_element(By.XPATH, './/p[@class="search_book_author"]').text
            author_pub_parts = author_pub.split('/')
            author = author_pub_parts[0].strip() if len(author_pub_parts) > 0 else ''
            publisher = author_pub_parts[-1].strip() if len(author_pub_parts) > 1 else ''

            # 评论数
            try:
                comment_count = item.find_element(By.XPATH, './/a[@name="itemlist-review"]').text.replace('条评论', '')
            except NoSuchElementException:
                comment_count = '0'

            # 图片URL
            img_url = item.find_element(By.XPATH, './/img').get_attribute('data-original')
            if not img_url:
                img_url = item.find_element(By.XPATH, './/img').get_attribute('src')

            # 写入CSV（包含编号）
            with open(csv_file, 'a', newline='', encoding='utf-8-sig') as f:
                writer = csv.writer(f)
                writer.writerow([book_id, title, price, author, publisher, comment_count, img_url])

            # 编号递增
            book_id += 1

        except Exception as e:
            print(f'提取数据时出错: {e}')
            continue

# 关闭浏览器
driver.quit()
print(f'爬取完成，共爬取 {book_id - 1} 本书，数据已保存到', csv_file)