import logging

import scrapy
from scrapy_092_58tc.items import Scrapy09258BookItem

'''
创建项目: scrapy startproject 项目名称
创建爬虫文件: 
在spiders目录下执行 scrapy genspider 爬虫名称 网页域名
执行项目: scrapy crawl car
'''


class DangdangSpider(scrapy.Spider):
    name = "dangdang"
    allowed_domains = ["category.dangdang.com"]
    start_urls = ["https://category.dangdang.com/cp01.21.02.03.00.00.html"]
    base_url = 'http://category.dangdang.com/pg'
    page = 1

    def parse(self, response):
        # 获取 名称,价格,图片
        li_list = response.xpath("//ul[@id='component_59']/li")
        for li in li_list:
            name = li.xpath("./a/@title").extract_first()
            # 取标签间的值要使用 /text()
            price = li.xpath("./p[@class='price']/span[@class='search_now_price']/text()").extract_first()
            # https://img3m9.ddimg.cn/57/16/26445729-1_b_1734077256.jpg
            #  //img3m9.ddimg.cn/61/4/25327429-1_b_1723537119.jpg
            src = li.xpath("./a/img/@data-original").extract_first()
            # print(src)
            src2 = 'https:' + str(src)
            # print(name, price, src2)
            book = Scrapy09258BookItem(name=name, price=price, src_url=src2)
            yield book
        '''
        分页或取多页 100页
        https://category.dangdang.com/cp01.21.02.03.00.00.html
        https://category.dangdang.com/pg2-cp01.21.02.03.00.00.html
        https://category.dangdang.com/pg3-cp01.21.02.03.00.00.html
        '''
        if self.page < 3:
            print('执行第:' + str(self.page) + '页')
            logging.warning('执行第:' + str(self.page) + '页')
            self.page = self.page + 1
            url = self.base_url + str(self.page) + '-cp01.21.02.03.00.00.html'
            # 如何递归调用 parse 方法
            # scrapy.Request 就是scrapy的get请求
            # URL 是请求地址
            # callback 是要执行的函数
            yield scrapy.Request(url=url, callback=self.parse)
