# -*- coding: utf-8 -*-
import scrapy
from ..items import DaomuItem
import os

class DaomuSpider(scrapy.Spider):
    name = 'daomu'
    allowed_domains = ['www.daomubiji.com']
    start_urls = ['http://www.daomubiji.com/']

    def parse(self, response):
        """一级页面解析函数:提取大标题和大链接"""
        x = '//li[contains(@id,"menu-item-20")]'
        li_list = response.xpath(x)
        for li in li_list:
            # 大标题和大链接
            item = DaomuItem()
            item['parent_title'] = li.xpath('./a/text()').get()
            # 创建对应的目录结构
            directory = './novel/{}/'.format(item['parent_title'])
            if not os.path.exists(directory):
                os.makedirs(directory)

            parent_href = li.xpath('./a/@href').get()

            # 继续交给调度器入队列
            yield scrapy.Request(url=parent_href, meta={'item1':item}, callback=self.parse_two_page)

    def parse_two_page(self, response):
        """二级页面: 提取小标题和小链接"""
        item1 = response.meta['item1']
        art_list = response.xpath('//article')
        for art in art_list:
            item = DaomuItem()
            item['son_title'] = art.xpath('./a/text()').get()
            son_href = art.xpath('./a/@href').get()
            item['parent_title'] = item1['parent_title']

            # 交给调度器入队列
            yield scrapy.Request(url=son_href, meta={'item':item}, callback=self.parse_three_page)

    def parse_three_page(self, response):
        """三级页面:具体小说内容"""
        item = response.meta['item']
        # p_list:['段落1', '段落2',...]
        p_list = response.xpath('//article[@class="article-content"]/p/text()').extract()
        novel_content = '\n'.join(p_list)

        item['novel_content'] = novel_content

        yield item









