import scrapy
from scrapy.selector import Selector
from scrapy.spiders import CrawlSpider
from scrapy.http import Request
from ..items import RemenItem

class RemenspiderSpider(scrapy.Spider):
    name = 'remenspider'
    #allowed_domains = ['www.jianshu.com']
    start_urls = ['https://www.jianshu.com/recommendations/collections?page=1&order_by=hot']  #爬取首页
    def parse(self, response):    #解析并提取网页内容
        item = RemenItem()  #创建一个item对象，其中包含四个成员变量
        selector = Selector(response)   #使用Selector 对请求返回的响应网页内容进行解析
        infos = selector.xpath('//div[@class="col-xs-8"]')  #先抓到每个主题
        for info in infos:
            try:
                name = info.xpath('div/a[1]/h4/text()').extract()[0]
                content = info.xpath('div/a[1]/p/text()').extract()[0]
                article = info.xpath('div/div/a/text()').extract()[0]
                fans = info.xpath('div/div/text()').extract()[0]
                item['name']=name
                item['content']=content
                item['article']=article
                item['fans']=fans
                yield item
            except IndexError:
                pass

        #定义爬虫地址列表
        urls=['https://www.jianshu.com/recommendations/collections?page={}&order_by=hot'.format(str(i))
              for i in range(2,5)]
        for url in urls:
            yield Request(url,callback=self.parse)
