# coding:utf-8
from scrapy import Request
from scrapy.spiders import Spider
from ..items import BookItem

class booskSpider(Spider):
    name = 'book'  # 给爬虫起名字 name不可换其他的属性

    # 初始请求，用于获取起始URL
    def start_requests(self):
        url = 'https://books.toscrape.com/catalogue/page-1.html'  # 这个url专门用来训练爬虫而做的
        # 返回继续执行(yield)  return不会执行
        yield Request(url)  # 向网站发送请求

    # 解析数据的函数
    def parse(self, response, **kwargs):
        #保存所有数据
        item = BookItem()
        lis = response.xpath('//ol[@class ="row"]/li')
        for oneLi in lis:
            # 书名
            bookname = oneLi.xpath('article/h3/a/@title').extract()[0]
            # 价格
            price = oneLi.xpath('article/div[@class = "product_price"]/p[1]/text()').extract()[0]
            #图片
            img_url = oneLi.xpath('article/div[@class = "image_container"]/a/img/@src').extract()[0]
            img_url = img_url.split("..")[-1]
            img_url = "https://books.toscrape.com/"+img_url

            item["name"] = bookname
            item["price"] = price
            item["img_url"] = img_url
            yield item

        # 下一页
        next_url = response.xpath('//li[@class = "next"]/a/@href').extract()
        if next_url:
            next_url = 'https://books.toscrape.com/catalogue/' + next_url[0]
            yield Request(next_url)
