import scrapy
# from t1.items import T1Item

class GuwenPySpider(scrapy.Spider):
    name = 'guwen'
    allowed_domains = ['www.shicimingju.com']
    start_urls = ['https://www.shicimingju.com/bookmark/ershisishi.html']

    def parse(self, response):
        list1 = response.xpath('//div[@class="book-item"]/h3/a')
        itemn = []
        item1 = []
        for node in list1:
            link1 = node.xpath('./@href').extract()[0]
            name = node.xpath('./text()').extract()[0]
            itemn.append(name)
            item1.append(link1)
        for (i,link) in enumerate(item1):
            yield scrapy.Request('https://www.shicimingju.com'+link,callback=self.parse2, meta={'book':itemn[i]})

    def parse2(self,response):
        list2 = response.xpath('//div[@class="book-mulu"]//li/a')
        itemd = []
        item2 = []
        for node in list2:
            link2 = node.xpath('./@href').extract()[0]
            directoty = node.xpath('./text()').extract()[0]
            itemd.append(directoty)
            item2.append(link2)
        for j in range(len(item2)):
            yield scrapy.Request('https://www.shicimingju.com'+item2[j],callback=self.parse3,meta={'items':[itemd[j], response.meta['book']]})

    def parse3(self,response):
        list3 = response.xpath('//div[@class="chapter_content"]/p')
        itemt = []
        for node in list3:
            contents = node.xpath('./text()').extract()[0]
            itemt.append(contents)
        content = '\n'.join(itemt)

        if len(itemt)<1:
            contents = response.xpath('//div[@class="chapter_content"]/text()').extract()
            content = ''.join(contents)

        dict = {}
        dict['name'] = response.meta['items'][1]
        dict['directory'] = response.meta['items'][0]
        dict['content'] = content
        yield dict
        # print(dict)

