import scrapy
from Novel.items import NovelItem


class NovelSpider(scrapy.Spider):
    """使用请求传参方式 适用于深度页面爬取"""
    name = 'novel'
    # allowed_domains = ['www.xxx.com']
    start_urls = ['http://www.xbiquge.la/14/14930/'] # 主页面地址

    def parse(self, response):
        """解析章节名字和地址"""
        for dd in response.xpath('//div[@id="list"]/dl/dd'):
            chapter_name = dd.xpath('./a/text()')[0].extract()
            chapter_url = 'http://www.xbiquge.la' + dd.xpath('./a/@href')[0].extract()
            # print(chapter_name, chapter_url)
            
            item = NovelItem()
            item["chapter_name"] = chapter_name
            
            # 手动发送请求
            # 请求传参 通过meta封装的字典传入参数
            yield scrapy.Request(chapter_url, callback=self.parse_content, meta={'item': item})
            
            
    def parse_content(self, response):
        """解析章节详情页中的内容"""
        content = response.xpath('//*[@id="content"]//text()').extract()
        content = ''.join(content[:-2])
        # print(content)
        # 接收参数 通过meta的键获取传入的参数
        item = response.meta['item']
        item["chapter_content"] = content
        
        # 向pipeline提交item
        yield item
        
        
