# -*- coding: utf-8 -*-
import scrapy
from scrapy.loader import ItemLoader
from scrapy.loader.processors import TakeFirst, MapCompose
from urllib.parse import urljoin

from ..items import NovelItem


class Biquge5200Spider(scrapy.Spider):
    name = 'biquge5200'
    allowed_domains = ['www.biquge5200.cc']
    start_urls = ['http://www.biquge5200.cc/']

    # 分类模板定义
    temp_1 = ["玄幻小说", "修真小说", "都市小说", "穿越小说", "网游小说", "科幻小说", "言情小说", "同人小说"]
    # temp_2 = ["排行榜单", "全本小说"]
    # 小说信息抓取
    novel_info = ["novel_name", "novel_cover_url", "novel_desc", "novel_author", "novel_lasted_update",
                  "novel_lasted_chapter_name", "novel_lasted_chapter_url"]
    # css 配置
    css_config = {
        "index_url_href": ".nav ul li a",
        "novel_name": "#info h1::text",
        "novel_cover_url": "#fmimg img::attr(src)",
        "novel_url": "",
        "novel_desc": "#intro::text",
        "novel_author": "#info p:nth-child(2)::text",
        "novel_lasted_update": "#info p:nth-child(4)::text",
        "novel_lasted_chapter_name": "#info p:nth-child(5) a::text",
        "novel_lasted_chapter_url": "#info p:nth-child(5) a::attr(href)",
    }

    def parse(self, response):
        """
               解析首页导航, 不同的分类使用不同模板解析
               :param response:
               :return:
               """
        navs = response.css(self.css_config['index_url_href'])
        for nav in navs:
            nav_name = nav.css('::text').extract_first()
            nav_url = nav.css('::attr(href)').extract_first()
            if nav_name in self.temp_1:
                yield scrapy.Request(url=urljoin(response.url, nav_url), callback=self.parse_temp_1)

    def parse_temp_1(self, response):
        """
        解析模板1
        :param response:
        :return:
        """
        # 九宫格
        hot_content_urls = response.css("#hotcontent .ll dt a::attr(href)").extract()
        # 左边和右边
        news_content_urls = response.css("#newscontent .s2 a::attr(href)").extract()
        for hot_content_url in list(set(hot_content_urls + news_content_urls)):
            yield scrapy.Request(url=urljoin(response.url, hot_content_url), callback=self.parse_novel_info)

    def parse_novel_info(self, response):
        """
        解析小说详情页面
        :param response:
        :return:
        """

        novel_loader = ItemLoader(item=NovelItem(), response=response)
        novel_loader.default_output_processor = TakeFirst()
        novel_loader.add_value("website_url", self.allowed_domains[0])
        novel_loader.add_value("novel_url", response.url)
        novel_loader.add_css("novel_name", "#info h1::text")
        novel_loader.add_css("novel_cover_url", "#fmimg img::attr(src)")
        novel_loader.add_css("novel_desc", "#intro::text")
        novel_loader.add_css("novel_author", "#info p:nth-child(2)::text")
        novel_loader.add_value("novel_lasted_update", "#info p:nth-child(4)::text")
        novel_loader.add_value("novel_lasted_chapter_name", "  ")
        novel_loader.add_value("novel_lasted_chapter_url", "  ")
        novel_item = novel_loader.load_item()
        yield novel_item
