import scrapy
from AboutNovel.items import AboutnovelItem


class AboutNovelSpider(scrapy.Spider):
    """
    获取多页小说名称和详情页信息 深度+多页
    深度: 构建一个解析深度页面的方法调用
    多页：递归调用parse方法
    """
    name = 'about_novel'
    # allowed_domains = ['www.cc.com']
    start_urls = ['http://www.xbiquge.la/fenlei/1_1.html'] # 首页url
    
    demo_url = 'http://www.xbiquge.la/fenlei/1_{}.html'
    page = 2

    def parse(self, response):
        """解析首页"""
        for li in response.xpath('//*[@id="newscontent"]/div[1]/ul/li'):
            novel_name = "".join(li.xpath('./span[@class="s2"]//text()').extract())
            novel_author = li.xpath('.//span[@class="s5"]/text()')[0].extract()
            novel_url = li.xpath('./span[@class="s2"]/a/@href')[0].extract()
            # print(novel_name, novel_author, novel_url)
            
            item = AboutnovelItem()
            item["novel_name"] = novel_name
            item["novel_author"] = novel_author
            
            # 发送详情页请求 请求传参
            yield scrapy.Request(novel_url, callback=self.parse_detail, meta={'item': item})
        
        # 发送翻页请求
        if self.page < 10:
            print('下载第%d页'%self.page)
            url = self.demo_url.format(self.page)
            self.page += 1
            yield scrapy.Request(url, callback=self.parse)
        
            
    def parse_detail(self, response):
        """解析详情页"""
        novel_detail = response.xpath('//*[@id="intro"]/p[2]/text()')[0].extract()
        # print(novel_detail)
        # 接收item参数
        item = response.meta["item"]
        item["novel_detail"] = novel_detail
        
        # 向pipeline提交item
        yield item 
