import scrapy
from dangdang_spider.items import DangdangSpiderItem


# from scrapy.pipelines.images import ImagesPipeline
# from scrapy.exceptions import DropItem
# from scrapy.http import Request

class DangdangSpider(scrapy.Spider):
    name = "dangdang"
    allowed_domains = ["bang.dangdang.com"]
    start_urls = [
        f"http://bang.dangdang.com/books/bestsellers/01.00.00.00.00.00-recent7-0-0-1-{page}"
        for page in range(1, 26)
    ]
    custom_settings = {
        "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/137.0.0.0 Safari/537.36",
        "referer": "http://bang.dangdang.com/",
        "DOWNLOAD_DELAY": 1,
    }

    def parse(self, response, **kwargs):
        books = response.css('.bang_list li')
        for book in books:
            item = DangdangSpiderItem()
            try:
                # 图书排名
                item['rank'] = book.css('.list_num::text').get().strip().replace(".", "")
                # 图书名称
                item['name'] = book.css('.name a::text').get().strip()
                # 作者信息
                item['author'] = book.css('.publisher_info a::text').get(default="未知作者")
                # 出版时间
                item['pub_time'] = book.css('.publisher_info span::text').get(default="未知时间")
                # 出版社
                publishers = book.css('.publisher_info a::text').getall()
                item['publisher'] = publishers[-1] if len(publishers) >= 2 else "未知出版社"
                # 价格
                price_text = book.css('.price .price_n::text').get()
                item['price'] = float(price_text.replace("¥", "")) if price_text else 0.0
            except Exception as e:
                self.logger.error(f"解析图书失败:{str(e)}")
                continue
            yield item
