from concurrent.futures import ThreadPoolExecutor, as_completed
from typing import Any

import scrapy
from scrapy.cmdline import execute
from twisted.internet.threads import deferToThread

from dangdang.items import DangdangItem


class DangSpider(scrapy.Spider):
    name = "dang"
    allowed_domains = ["category.dangdang.com"]
    start_urls = ["https://category.dangdang.com/pg1-cp01.01.02.00.00.00.html"]

    base_url = "https://category.dangdang.com/pg"
    suffix_url = "-cp01.01.02.00.00.00.html"
    page = 1

    def parse(self, response):
        li_list = response.xpath('//ul[@id="component_59"]/li')

        for li in li_list:
            yield self.handler_item(li)

        # 翻页逻辑
        if self.page < 100:
            self.page += 1
            url = self.base_url + str(self.page) + self.suffix_url
            yield scrapy.Request(url=url, callback=self.parse)

    def handler_item(self, li):
        # 提取数据
        img = li.xpath('.//img/@data-original').extract_first()
        if img is None:
            img = li.xpath('.//img/@src').extract_first()
        name = li.xpath('./p[@class="name"]/a/@title').extract_first()
        price = li.xpath('./p[@class="price"]/span[1]/text()').extract_first()
        author = li.xpath('./p[@class="search_book_author"]/span[1]/a[1]/@title').extract_first() or "未知作者"
        intro = li.xpath('./p[@class="detail"]/text()').extract_first() or "无简介"
        publisher = li.xpath('./p[@class="search_book_author"]/span[3]/a[1]/@title').extract_first()
        # 创建 Item
        return DangdangItem(name=name, img=img, price=price, author=author, intro=intro, publisher=publisher)



if __name__ == '__main__':
    execute(['scrapy', 'crawl', 'dang'])

# 翻页
# 第一页  https://category.dangdang.com/pg1-cp01.01.02.00.00.00.html
# 第一页  https://category.dangdang.com/pg2-cp01.01.02.00.00.00.html
# 第一页  https://category.dangdang.com/pg3-cp01.01.02.00.00.00.html
