import os.path

import scrapy
from ..items import DaomuItem


class DaomuSpider(scrapy.Spider):
    name = "daomu"
    allowed_domains = ["www.daomubiji.com"]
    start_urls = ["https://www.daomubiji.com/"]

    def parse(self, response, *args, **kwargs):
        # 一级页面解析函数
        a_list = response.xpath("//li[contains(@id,'menu-item-20')]/a")
        print(a_list)
        for a in a_list:
            item = DaomuItem()
            item['parent_title'] = a.xpath("./text()").get()
            # 创建对应目录结构
            derectory = './novel/{}/'.format(item['parent_title'])
            if not os.path.exists(derectory):
                os.makedirs(derectory)
            # 二级页面链接
            parent_href = a.xpath('./@href').get()

            # 再次交给调度器
            yield scrapy.Request(url=parent_href, meta={'item': item}, callback=self.parse_second_pase)

    def parse_second_pase(self, response):
        # 二级页面解析
        meta1 = response.meta['item']
        second_list = response.xpath('//article/a')
        for a in second_list:
            # 创建全新的item对象,避免重复赋值
            item = DaomuItem()
            item['son_title'] = a.xpath('./text()').get()
            item['parent_title'] = meta1['parent_title']
            three_url = a.xpath('./@href').get()

            # 再次将三级页面交给调度器获取文章内容
            yield scrapy.Request(url=three_url, meta={'item': item}, callback=self.three_pase)

    def three_pase(self, response):
        # 三级页面解析 获取文章内容
        item = response.meta['item']
        # get()方法是提取第一个系列化数据 extract()方法提取所有系列化数据
        p_list = response.xpath('//article/p/text()').extract()
        item['novel_content'] = '\n'.join(p_list)

        # 接下来交给管道处理
        yield item
