# -*- coding: utf-8 -*-
import scrapy

class NovelSpider(scrapy.Spider):
    name = 'novel'
    # allowed_domains = ['www.hengyan.com/article/378451.aspx']
    # 指定爬取的URL
    start_urls = ['http://www.hengyan.com/article/378451.aspx']
    # 简单的反爬虫
    headers = {
        'User-Agent':
        'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.97 Safari/537.36',
        'Accept':
        'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3'
    }

    # 重写scrapy方法
    # def start_requests(self):
    #     for url in self.start_urls:
    #         yield scrapy.Request(url=url, callback=self.parse, headers=self.headers)

    def parse(self, response):
        contentitems = response.xpath('//div[@class="contentitem"]')
        for contentitem in contentitems:
            yield {
                "name" : contentitem.xpath('div[@class="dh"]/p/a/text()').extract()[1], # 得到小说名称
                "title" : contentitem.xpath('div[@class="ch"]/h2/text()').extract(),
                "content" : contentitem.xpath('div[@class="content"]/p/text()').extract()
            }
        # 得到下一页的网址
        next_urls = response.xpath('//div[@id="getnext"]/a/@href').extract()
            # 拼接
        next_url = 'http://www.hengyan.com/'+next_urls[0]
        print(next_url)
        yield scrapy.Request(url=next_url, callback=self.parse, headers=self.headers)