# -*- coding: utf-8 -*-
import re

import scrapy
from novel_download_more_source.items import NovelDownloadMoreSourceItem

import novel_source_data as nsd


class NovelSpider(scrapy.Spider):
    def __init__(self, par=None, *args, **kwargs):
        super(NovelSpider, self).__init__(*args, **kwargs)
        self.source = par.split('|')[0]
        self.url = par.split('|')[1]

    name = 'novel'
    # allowed_domains = [nsd[x]['s_domain'] for x in nsd.scrapy_source.keys()]
    allowed_domains = [nsd.scrapy_source[x]['s_domain'] for x in nsd.scrapy_source.keys()]
    # start_urls = ['http://biquge.la/']
    novel_name = ''

    def start_requests(self):
        yield scrapy.Request(self.url, callback=self.parse)

    def parse(self, response):
        # 小说名称
        self.novel_name = response.xpath(nsd.scrapy_source[self.source]['s_name_xpath']).get()

        # 小说章节url
        chapter_urls = response.xpath(nsd.scrapy_source[self.source]['s_url_xpath']).extract()

        # 遍历每个章节url 进行章节下载，传递一个参数num 作为小数章节最后的组合排列
        for i, url in enumerate(chapter_urls):
            if nsd.scrapy_source[self.source]['s_self_url']:
                url = self.url + nsd.scrapy_source[self.source]['s_other_str'] + url
            else:
                url = nsd.scrapy_source[self.source]['s_base_url'] + nsd.scrapy_source[self.source]['s_other_str'] + url
            yield scrapy.Request(url, callback=self.parse_info, cb_kwargs={'num': i + 1}, dont_filter=True)

    def parse_info(self, response, num):
        item = NovelDownloadMoreSourceItem()
        item['num'] = num
        item['chapter_name'] = response.xpath(nsd.scrapy_source[self.source]['s_chapter_name_xpath']).get()

        # 原始的内容
        content = '\n'.join(response.xpath(nsd.scrapy_source[self.source]['s_chapter_content_xpath']).extract()).strip()

        # 处理小说章节内一些多余的信息
        for x in nsd.scrapy_source[self.source]['s_replace']:
            content = content.replace(x, '')

        item['chapter_content'] = content

        yield item
