import scrapy
from scrapy import Request
from fiction_Scrapy.items import FictionScrapyItem
import random
import time

class fictionSpider(scrapy.Spider):
    name = "fictionSpider"

    link_list = []#存放目录链接

    #小说目录总纲
    start_urls  =['https://www.dxmwx.org/chapter/10000.html']

    def parse(self, response):
        alist = response.xpath("//div[@style=' height:40px; line-height:40px; border-bottom: 1px dotted #ccc; overflow:hidden; font-size:14px;']/span/a")
        for a_element in alist:
            a_data = {}
            a_data['title'] = a_element.xpath("text()").extract()[0]
            a_data['url'] = 'https://www.dxmwx.org'+(a_element.xpath("@href").extract()[0])
            self.link_list.insert(0, a_data)
        firstData = self.link_list.pop()
        yield Request(url=firstData['url'], callback=self.parse_content, meta = {'title':firstData['title']},dont_filter=True)

    def parse_content(self, response):
        self.fictionScrapyItem = FictionScrapyItem()
        self.fictionScrapyItem['title'] = response['meta']['title']
        self.fictionScrapyItem['content'] = response.xpath('//*[@id="Lab_Contents"]/text()').extract()  # 分页
        yield self.fictionScrapyItem
        if len(self.link_list) > 0:
            nextLink = self.link_list.pop()
            # 生成 3-5 秒的随机延迟
            delay = random.uniform(3, 5)
            time.sleep(delay)
            yield Request(url=str(nextLink), callback=self.parse_content, dont_filter=True)