# -*- coding: utf-8 -*-
import scrapy
from copy import deepcopy

class JdSpider(scrapy.Spider):
    name = 'jd'
    allowed_domains = ['jd.com']
    start_urls = ['https://book.jd.com/booksort.html']

    def parse(self, response):
        dt_list = response.xpath('//div[@class="mc"]/dl/dt')  # 大分类
        for dt in dt_list[:1]:
            item = {}
            item['book_type'] = dt.xpath('./a/text()').extract_first()
            em_list = dt.xpath('./following-sibling::*[1]/em')  # 小分类
            for em in em_list[:1]:
                item['book_type_small'] = em.xpath('./a/text()').extract_first()
                item['book_href_small'] = 'https:' + em.xpath('./a/@href').extract_first()
                print(item['book_href_small'])
                yield scrapy.Request(
                    item['book_href_small'],
                    callback=self.parse_em,
                    meta={'item':deepcopy(item)}
                )

    def parse_em(self, response):  # 提取每一个小分类的数据
        item = response.meta['item']
        li_list = response.xpath('//ul[@class="gl-warp clearfix"]/li')
        for li in li_list:
            item['book_name'] = li.xpath('.//div[@class="p-name"]/a/em/text()').extract_first().strip()
            item['book_name'] = li.xpath('.//div[@class="p-name"]/a/em/text()').extract_first().strip()
            item['book_author'] = li.xpath('.//div[@class="p-bookdetails"]//span[@class="author_type_1"]/a/@title').extract_first()
            print(item)

        next_url = response.xpath('//a[text()="下一页"]/@href').extract_first()
        print(next_url)
        if next_url:
            yield response.follow(
                next_url,
                callback=self.parse_em,
                meta={'item':item}
            )