# -*- coding: utf-8 -*-
from urllib.parse import urlparse

import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import Rule

"""
爬取一部小说

参数：

:param idx     书号，例如，http://www.biquyun.com/14_14055/ 对应的书号为 14_14055

"""


# 按照链接url排序
def sort(elem):
    return elem.url


class IndexSpider(scrapy.spiders.CrawlSpider):
    name = 'index'
    allowed_domains = ['biquyun.com']
    start_urls = ['http://www.biquyun.com/14_14055/']

    rules = [
        Rule(LinkExtractor(allow=('/14_14055/',)), follow=True, callback='parse_item')
    ]

    # 书号
    idx = None
    # 起始章节
    start_idx = 0

    # 自定义配置
    # custom_settings = {
    #     'CONCURRENT_REQUESTS': 1,  # 并行处理数； 1- 串行处理
    # }

    def start_requests(self):
        # 获取命令行参数
        self.idx = getattr(self, 'idx', None)
        assert self.idx is not None, "param [idx] can not be null"

        self.start_idx = int(getattr(self, 'start_idx', 0), base=10)

        self.start_urls = ['http://www.biquyun.com/' + self.idx + '/']
        self.rules = [
            Rule(LinkExtractor(allow=('/' + self.idx + '/',)), follow=False, callback='parse_item')
        ]
        # self.logger.info(self.start_urls)
        return super().start_requests()

    def parse(self, response):
        # print(response.text)
        # return super().parse(response)
        if response.status == 200:
            URLgroup = LinkExtractor(allow=('/' + self.idx + '/',)).extract_links(response)
            # 链接排序
            URLgroup.sort(key=sort, reverse=True)
            for URL in URLgroup:
                idx__replace = URL.url.split(self.idx + '/')[1].replace(".html", "")
                if (idx__replace.isnumeric()):
                    idx__replace = int(idx__replace, base=10)
                    if (idx__replace <= self.start_idx):
                        continue
                else:
                    continue
                # print(idx__replace)
                yield scrapy.Request(url=URL.url, dont_filter=True, callback=self.parse_item)
            # raise Exception("end")

    def parse_item(self, response):
        item = dict()
        item["idx"] = response.url.split('/')[4]
        item["title"] = response.css("title::text").extract_first()
        yield item
