# -*- coding: utf-8 -*-
import re

import scrapy
from scrapy import Selector

from pymongo import MongoClient

from moose_poetry_spider.items import MoosePoetryCategoryItem

'''
分类下对应诗词
'''


class MoosePoetryCategorySpider(scrapy.Spider):
    name = 'MoosePoetryCategorySpider'

    allowed_domains = ['www.shicimingju.com']

    start_urls = []

    prefix_url = 'https://www.shicimingju.com'

    def __init__(self, *a, **kw):
        super(MoosePoetryCategorySpider, self).__init__(*a, **kw)

    @classmethod
    def from_crawler(cls, crawler, *args, **kwargs):
        spider = super(MoosePoetryCategorySpider, cls).from_crawler(crawler, *args, **kwargs)
        mongo_uri = crawler.settings.get('MONGO_URI'),
        mongo_database = crawler.settings.get('MONGO_DATABASE', 'moose')
        client = MongoClient(mongo_uri)
        spider._db = client[mongo_database]
        return spider

    def start_requests(self):
        result = self._db['poetry_category_info'].find()
        for item in result:
            # print("start_requests :: ", item['category_detail_url'])
            category_detail_url = item['category_detail_url']
            url = 'https://www.shicimingju.com{}'.format(category_detail_url)
            yield scrapy.Request(url, dont_filter=True, meta={"category_item": item})

    def parse(self, response):
        category_item = response.meta['category_item']
        poetry_content_list = response.xpath("//div[@id='main_left']/div[@class='card shici_card']/div").extract()
        poetry_category = MoosePoetryCategoryItem()
        poetry_category['category_id'] = category_item['_id']
        for content_item in poetry_content_list:
            # 解析诗人信息
            author_info = Selector(text=content_item).xpath(
                "//div/div[@class='list_num_info']/text()").extract()
            if len(author_info) > 0 and len(author_info) == 3:
                dynasty = author_info[1].replace(" ", "").replace("\n", "")
                poetry_category['dynasty'] = dynasty
                author_name = author_info[2].replace(" ", "").replace("\n", "")
                poetry_category['author_name'] = author_name
            # 解析诗名
            poetry_name = Selector(text=content_item).xpath(
                "//div/div[@class='shici_list_main']/h3/a/text()").extract_first()
            poetry_category['poetry_name'] = poetry_name

            # 解析诗内容
            poetry_content_more = Selector(text=content_item).xpath("//div/div[@class='shici_list_main']/div").extract()
            for item in poetry_content_more:
                html = Selector(text=item).xpath("//div[@class='shici_content']").extract_first()
                if html is not None:
                    pattern = re.compile(r'<[^>]+>', re.S)
                    poetry_content = pattern.sub("", html) \
                        .replace("展开全文", "").replace("收起", "").replace("\n", "").replace("\r", "").replace(" ", "")
                    poetry_category['poetry_content'] = poetry_content
                    # print(poetry_category)
                    yield poetry_category
        # 获取下一页数据
        next_url = response.xpath(
            "//div[@id='main_left']/div[@id='list_nav']/div[@id='list_nav_part']/a[text()='下一页']/@href").extract_first()
        if next_url is not None:
            yield scrapy.Request(str.format("{}{}", self.prefix_url, next_url), callback=self.parse,
                                 dont_filter=True, meta={"category_item": category_item})
