# -*- coding: utf-8 -*-
import logging

import scrapy
from scrapy import Selector
import re

from pymongo import MongoClient

from moose_poetry_spider.items import MoosePoetryItem

'''
诗人对应诗词信息
'''


class MoosePoetryAuthorSpider(scrapy.Spider):
    name = 'MoosePoetryAuthorSpider'

    allowed_domains = ['www.shicimingju.com']

    start_urls = []

    prefix_url = 'https://www.shicimingju.com'

    def __init__(self, *a, **kw):
        super(MoosePoetryAuthorSpider, self).__init__(*a, **kw)

    @classmethod
    def from_crawler(cls, crawler, *args, **kwargs):
        spider = super(MoosePoetryAuthorSpider, cls).from_crawler(crawler, *args, **kwargs)
        mongo_uri = crawler.settings.get('MONGO_URI'),
        mongo_database = crawler.settings.get('MONGO_DATABASE', 'moose')
        client = MongoClient(mongo_uri)
        spider._db = client[mongo_database]
        return spider

    def start_requests(self):
        result = self._db['poetry_author_base_info'].find()
        for item in result:
            detail_url = item['detail_url']
            print(detail_url)
            yield scrapy.Request(detail_url, dont_filter=True, meta={"author_base_info": item})

    def parse(self, response):
        author_base_info = response.meta['author_base_info']

        # 封装Item对象
        poetry_item = MoosePoetryItem()

        # Item 类型
        poetry_item['author_id'] = author_base_info['_id']

        # 解析内容
        poetry_content_list = response.xpath("//div[@id='main_left']/div[@class='card shici_card']/div").extract()
        for content_item in poetry_content_list:
            list_num_info = Selector(text=content_item).xpath(
                "//div/div[@class='list_num_info']/text()").extract_first()
            if list_num_info is not None:
                num = str.strip(list_num_info)
                poetry_item["ranking_num"] = int(num)
            # 解析诗名
            poetry_name = Selector(text=content_item).xpath(
                "//div/div[@class='shici_list_main']/h3/a/text()").extract_first()
            if poetry_name is not None:
                poetry_item["poetry_name"] = poetry_name

            # 解析诗内容
            poetry_content_more = Selector(text=content_item).xpath("//div/div[@class='shici_list_main']/div").extract()
            for item in poetry_content_more:
                html = Selector(text=item).xpath("//div[@class='shici_content']").extract_first()
                if html is not None:
                    pattern = re.compile(r'<[^>]+>', re.S)
                    poetry_content = pattern.sub("", html) \
                        .replace("展开全文", "") \
                        .replace("收起", "") \
                        .replace("\n", "") \
                        .replace("\r", "") \
                        .replace(" ", "")
                    poetry_item['poetry_content'] = poetry_content

                    # 打印数据
                    # logging.info(poetry_item)

                    # 交给 Pipeline
                    yield poetry_item

        # 获取下一页数据
        next_url = response.xpath(
            "//div[@id='main_left']/div[@id='list_nav']/div[@id='list_nav_part']/a[text()='下一页']/@href").extract_first()
        if next_url is not None:
            yield scrapy.Request(str.format("{}{}", self.prefix_url, next_url),
                                 callback=self.parse,
                                 dont_filter=True,
                                 meta={"author_base_info": author_base_info})
