# -*- coding: utf-8 -*-
import scrapy
from doubanScrapy.items import DoubanscrapyItem
class RandomUserAgent(object):
    def __init__(self, agents):
        self.agents = agents
    @classmethod
    def from_crawler(cls, crawler):
        # 从Settings中加载USER_AGENTS的值
        return cls(crawler.settings.getlist('USER_AGENTS'))
    def process_request(self, request, spider):
        # 在process_request中设置User-Agent的值
        request.headers.setdefault('User-Agent', random.choice(self.agents))
class BookscrapySpider(scrapy.Spider):
    name = 'bookScrapy'
    allowed_domains = ['douban.com']
    start_urls = ['https://book.douban.com/tag/编程']
    def parse(self, response):
        books = response.xpath('//li[@class="subject-item"]/div[@class="info"]')
        for oneBook in books:
            item = DoubanscrapyItem()
            title = oneBook.xpath("normalize-space( h2[@class='']/a/text()) ").extract_first()
            author = oneBook.xpath("normalize-space( div[@class='pub']/text()  )").extract_first().split('/')[1]
            publisher = oneBook.xpath("normalize-space( div[@class='pub']/text()  )").extract_first().split('/')[-3]
            publishTime = oneBook.xpath("normalize-space( div[@class='pub']/text()  )").extract_first().split('/')[-2]
            price = oneBook.xpath("normalize-space( div[@class='pub']/text()  )").extract_first().split('/')[-1]
            rate = oneBook.xpath("div[@class='star clearfix']/span[@class='rating_nums']/text()").extract_first()
            commentsNum = oneBook.xpath("normalize-space(div[@class='star clearfix']/span[@class='pl']/text())").extract_first()
            item['title'] = title
            item['author'] = author
            item['publisher'] = publisher
            item['publishTime'] = publishTime
            item['price'] = price
            item['rate'] = rate
            item['commentsNum'] = commentsNum
            yield item
        nextUrl = response.xpath("//span[@class='next']/link/@href").extract_first()
        # 如果url里有这个内容，则退出
        # 其实只爬前5页的数据
        if (nextUrl.find('start=120') != -1):
            return
        else:
            # 用yield递归调用，爬取下一页的内容
            yield                             scrapy.Request("https://book.douban.com" + nextUrl, callback=self.parse,dont_filter=True)