import scrapy


class XiaoShuoSpider(scrapy.Spider):
  name = 'xiaoshuo'
  allowed_domains = ['www.qidian.com/rank/yuepiao']
  start_urls = ['http://www.81zw.us/book/606/.html']

  # 如何连续爬取网页
  def parse(self, response):
    title = response.xpath('//h1/text()').extract_first()
    content = ''
    yield {'title': title, 'content': content}
    next_url = response.xpath().extract_first()
    if next_url:
      yield scrapy.Request(response.urljoin(next_url), callback=self.parse)
