import scrapy
from ..items import DaomuItem
import os


class DaomuSpider(scrapy.Spider):
    name = "daomu"
    allowed_domains = ["www.daomubiji.com"]
    start_urls = ["https://www.daomubiji.com"]

    def parse(self, response):
        a_list = response.xpath('//li[contains(@id, "menu-item-2")]/a')
        for a in a_list:
            item = DaomuItem()
            item['parent_title'] = a.xpath('./text()').get()
            parent_href = a.xpath('./@href').get()

            # 创建文件夹
            directory = './data/{}/'.format(item['parent_title'])
            if not os.path.exists(directory):
                os.makedirs(directory)

            yield scrapy.Request(url=parent_href, meta={'item': item}, callback=self.parse_second_page)

    def parse_second_page(self, response):
        one_item = response.meta['item']
        a_list = response.xpath('//article/a')
        for a in a_list:
            item = DaomuItem()
            item['son_title'] = a.xpath('./text()').get()
            item['parent_title'] = one_item['parent_title']
            son_href = a.xpath('./@href').get()

            yield scrapy.Request(url=son_href, meta={'item': item}, callback=self.parse_third_page)

    def parse_third_page(self, response):
        item = response.meta['item']
        p_list = response.xpath('//article/p/text()').extract()
        item['novel_content'] = '\n'.join(p_list)

        yield item
