# -*- coding: utf-8 -*-
import logging
import copy
import re

import scrapy
from scrapy import Selector

from moose_poetry_spider.items import MoosePoetryAuthorBaseInfoItem

'''
诗人信息
'''


class MoosePoetryAuthorBaseInfoSpider(scrapy.Spider):
    name = 'MoosePoetryAuthorBaseInfoSpider'

    allowed_domains = ['www.shicimingju.com']

    start_urls = ['https://www.shicimingju.com/category/all']

    prefix_url = "https://www.shicimingju.com"

    def parse(self, response):
        pattern = re.compile(r'<[^>]+>', re.S)
        poetry_list = response.xpath('//div[@id="main_left"]/div[@class="card zuozhe_card"]').extract()
        author_base_info = MoosePoetryAuthorBaseInfoItem()
        if len(poetry_list) > 0:
            for poetry_item_str in poetry_list:
                html_introduction = Selector(text=poetry_item_str).xpath(
                    '//div[@class="zuozhe_list_item"]/div[@class="zuozhe_list_des"]').extract_first()
                if html_introduction is not None:
                    introduction = pattern.sub("", html_introduction).replace(" ", "").replace("\n", "")
                    author_base_info['introduction'] = introduction

                magnum_opus_html = Selector(text=poetry_item_str).xpath(
                    '//div[@class="zuozhe_good_shici_div"]').extract_first()
                if magnum_opus_html is not None:
                    magnum_opus = pattern.sub("", magnum_opus_html).replace(" ", "").replace("\n", "")
                    author_base_info['magnum_opus'] = magnum_opus

                author_detail_url = Selector(text=poetry_item_str).xpath(
                    '//div[@class="zuozhe_list_item"]/h3/a/@href').extract_first()
                if author_detail_url is not None:
                    author_detail_url = str.format("{}{}", self.prefix_url, author_detail_url)
                    author_base_info['detail_url'] = author_detail_url
                    yield scrapy.Request(
                        author_detail_url,
                        callback=self.parse_detail,
                        meta={'author_base_info': copy.deepcopy(author_base_info)},
                        dont_filter=True
                    )

        # 抓取下一页数据
        next_url = response.xpath(
            "//div[@id='main_left']/div[@id='list_nav']/div[@id='list_nav_part']/a[text()='下一页']/@href").extract_first()
        if next_url is not None:
            poetry_detail_url = str.format("{}{}", self.prefix_url, next_url)
            yield scrapy.Request(
                poetry_detail_url,
                callback=self.parse,
                dont_filter=True
            )

    def parse_detail(self, response):
        pattern = re.compile(r'<[^>]+>', re.S)

        author_base_info = response.meta['author_base_info']

        author_name = response.xpath(
            '//div[@id="main_right"]/div[@class="card about_zuozhe"]/div[2]/div/h4/a/text()').extract_first()
        author_base_info['author_name'] = author_name

        detail_info_html = response.xpath(
            '//div[@id="main_right"]/div[@class="card about_zuozhe"]/div[2]/div/div').extract_first()
        if detail_info_html is not None:
            detail_info = pattern \
                .sub("", detail_info_html) \
                .replace(" ", "") \
                .replace("\n", "")
            author_base_info['detail_info'] = detail_info

        dynasty = response.xpath(
            '//div[@id="main_right"]/div[@class="card about_zuozhe"]/div[3]/div[@class="aside_left"]/div[@class="aside_val"]/a/text()').extract_first()
        author_base_info['dynasty'] = dynasty

        collected_str = response.xpath(
            '//div[@id="main_right"]/div[@class="card about_zuozhe"]/div[3]/div[@class="aside_right"]/div[@class="aside_val"]/a/text()').extract_first()
        if collected_str is not None:
            collected = int("".join(re.findall(r'\d', collected_str)))
            author_base_info['collected'] = collected
        yield author_base_info
        # logging.info(poetry_category)
