# -*- coding: utf-8 -*-
import scrapy
from scrapy.selector import Selector
# 注意此处不要用 ..settings 而要用 父文件名.settings，django和scrapy这些框架都会因为这个东西出问题
from what.settings import memory_dict

class BiqugeSpider(scrapy.Spider):
    name = 'biquge'
    start_urls = ['https://www.biquge.info/34_34370/']

    def parse(self, response):
        selector = Selector(response)
        find_all = selector.xpath("/html/body/div[@id='wrapper']/div[@class='box_con'][2]/div[@id='list']/dl/dd")
        for section in find_all:
            href = section.xpath('.//@href').extract_first()
            title = section.xpath('.//@title').extract_first()
            if not title:
                title = 0  # 因为第一个东西是莫有章节显示的
            else:
                title = title.split('：')[0].lstrip('0')  # 去除前导0
            real_url = response.urljoin(href)
            request = scrapy.Request(real_url, callback=self.parse_detail)
            request.meta['number'] = title
            yield request

    def parse_detail(self, response):
        number = response.meta['number']
        selector = Selector(response)


        content_list = selector.xpath('//div[@id="content"]/text()').extract()
        url = response.url # 获取当前 url
        # temp = url.rsplit('/')[-1].strip('.html') # 得到当前章节的id
        # 根据规律章节id是从 12885458 开始的，所以用每个值减去12885458作为键，该章内容作为值
        # temp = int(temp) - 12885458 经过尝试，直接减去12885458是不可行的，在293章开始章节id直接变了
        content = '\n'.join(content_list)
        memory_dict[int(number)] = content


        item = dict()
        item['content'] = content
        print(content)
        yield item

    def close(spider, reason):
        print(memory_dict.keys())
        with open('D://女帝直播攻略.txt', 'a+', encoding='utf-8') as fp:
            for i in range(1600):  # 自己制定要保存前多少章咯
                try:
                    fp.write(str(i) + '\n' + memory_dict[i])
                except:
                    pass
