import scrapy
from dscrapy.items import DscrapyItem
class LeetcodeSpider(scrapy.Spider):
    name = "leetcode"
    #检查允许访问的域名   scrapy crawl leetcode --nolog
    allowed_domains = ["category.dangdang.com"]
    #修改起始url
    start_urls = ["https://category.dangdang.com/cp01.05.07.00.00.00.html"]
    #在parse中实现解析
    #scrapy crawl leetcode
    def parse(self, response):
        # title_lists = response.xpath('//li/p[1]/a')
        # price_lists = response.xpath('//li/p[3]/span[@class="search_now_price"]')
        # author_list = response.xpath('//li/p[5]/span[1]/a[1]')
        # comments = response.xpath('//li/p[4]/a')
        # print(len(title_lists),len(price_lists),len(author_list),len(comments))
        book_nodes = response.xpath('//ul[@class="bigimg"]/li')
        # book_list = []
        for book in book_nodes:
            # b_info = {}
            b_info = DscrapyItem()
            #xpath获取的是选择器对象列表  extract()用于从选择器对象中提取数据
            b_info["title"] = book.xpath('./p[1]/a/@title')[0].extract()  #属性里面的值用@
            b_info["price"] = book.xpath('./p[3]/span[@class="search_now_price"]/text()')[0].extract()
            a_list = book.xpath('./p[5]/span[1]/a[1]/text()')
            if len(a_list)>0:
                b_info["author"] = book.xpath('./p[5]/span[1]/a[1]/text()')[0].extract()
            else:
                b_info["author"] = ""
            b_info["comments"] = book.xpath('./p[4]/a/text()').extract_first()
        #     book_list.append(b_info)
        # return book_list
            yield b_info


        next_page = response.xpath('//div/ul/li[@class="next"]/a/@href').extract_first()
        if next_page is not None:
            yield response.follow(next_page, self.parse)
            
