import scrapy
from middler_spider.items import MiddlerSpiderItem
from copy import deepcopy
from scrapy_redis.spiders import RedisSpider


# 类继承 RedisSpider (原来是继承scrapy.Spider)
class ShoppingSpiderSpider(RedisSpider):
    name = "shopping_spider"
    allowed_domains = ["dangdang.com"]
    # 注销strat_urls 设置 redis_key = "db:start_urls" 开启爬虫钥匙
    # start_urls = ["https://category.dangdang.com/?ref=www-0-C"]
    redis_key = "book"

    def parse(self, response, **kwargs):
        data_list = response.xpath('//div[@class="classify_con"]//div//div[@class="classify_books"]')
        for data_one in data_list:
            item = MiddlerSpiderItem()
            
            # 解析主页面
            item["title_one"] = data_one.xpath('./div[@class="classify_books_detail"]/h3/a/text()').extract_first()
            data_two_xpath = data_one.xpath('./div[@class="classify_kind"]')
            for data_two in data_two_xpath:
                item["title_two"] = data_two.xpath('./div/a/text()').extract_first()
                data_three_xpath = data_two.xpath('./ul//li')
                for data_three in data_three_xpath:
                    item["title_three"] = data_three.xpath('./a/text()').extract_first()
                    href = "http:" + data_three.xpath('./a/@href').extract_first()
                    if href.split(".")[-1] == "html":
                        yield scrapy.Request(url=href, callback=self.parse_detail, meta={"item":deepcopy(item)})

    # 解析详情页
    def parse_detail(self, response):
        item = response.meta["item"]
        data_list = response.xpath('//ul[@id="component_59"]//li')
        for data in data_list:
            item["name"] = data.xpath('./p[@class="name"]/a/@title').extract_first()
            item["price"] = data.xpath('./p[@class="price"]/span[1]/text()').extract_first()
            item["name"] = data.xpath('./p[@class="detail"]/text()').extract_first()
            print(item)

