import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from Sun.items import SunItem, DetailItem


class SunSpider(CrawlSpider):
    """
        基于crawlspider的全站数据获取
        获取笔趣阁某一板块下的所有小说名称，作者和简介
        多页 + 深度
        基于linkExtractor and Rule
    """
    name = 'sun'
    # allowed_domains = ['www.xx.com']
    start_urls = ['http://www.xbiquge.la/xuanhuanxiaoshuo/']

    # LinkExtractor: 链接提取器
    # 根据指定的规则（allow正则表达式 也可以是其他的解析方法，由不同的参数控制）进行指定链接的获取
    link = LinkExtractor(allow=r'1_\d+\.html')       # 分页链接提取器 http://www.xbiquge.la/fenlei/1_xxx.html
    link_detail = LinkExtractor(allow=r'/\d+/\d+/')  # 详情链接提取器 http://www.xbiquge.la/68/68382/
    
    # Rule:规则解析器
    # 将连接提取器提取到的链接进行指定方法解析（callback）
    rules = (
        # fallow=True 将链接提取器继续作用到提取的链接对应的页面中！！！ 即刻实现爬取全站！！！
        Rule(link, callback='parse_item', follow=False),
        # 详情页中不需要设置follow=True
        Rule(link_detail, callback='parse_detail', follow=False),
    )

    # 小说名称，id和作者解析
    def parse_item(self, response):
        for li in response.xpath('//*[@id="newscontent"]/div[1]/ul/li'):
            novel_author = ''.join(li.xpath('./span[@class="s2"]//text()').extract())
            novel_name = li.xpath('./span[@class="s5"]/text()').extract_first()
            novel_id = ''.join(li.xpath('./span[@class="s2"]/a/@href')[0].extract().split('/')[-3:-1])
            # print(novel_author, novel_name, novel_id)
            item = SunItem()
            item['novel_author'] = novel_author
            item['novel_name'] = novel_name
            item['novel_id'] = novel_id
            yield item
            
                
    # 获取id 和详情
    # 两次获取id是为了数据存储时统一
    # 因为是基于一步下载 下载的数据可能不是有序的
    def parse_detail(self, response):
        # print(response.url)
        novel_id = ''.join(response.url.split('/')[-3:-1])
        novel_detail = response.xpath('//*[@id="intro"]/p[2]/text()').extract_first()
        # print(novel_id, novel_detail)
        item = DetailItem()
        item['novel_id'] = novel_id
        item['novel_detail'] = novel_detail
        
        yield item
