# -*- coding: utf-8 -*-
import copy
import re

import scrapy
from scrapy import Selector

from moose_poetry_spider.items import MoosePoetryCategoryInfoItem


class MoosePoetryCategoryInfoSpider(scrapy.Spider):
    name = 'MoosePoetryCategoryInfoSpider'

    prefix_url = 'https://www.shicimingju.com'

    allowed_domains = ['www.shicimingju.com']

    start_urls = ['https://www.shicimingju.com/category/all']

    def parse(self, response):
        poetry_category_list = response.xpath("//div[@id='main_right']/div[@class='card hc_other']/ul/li").extract()
        category_info = MoosePoetryCategoryInfoItem()

        for category_item in poetry_category_list:
            category_name = Selector(text=category_item).xpath("//li/a/text()").extract_first()
            category_info['category_type_cn_name'] = category_name

            category_detail_url = Selector(text=category_item).xpath("//li/a/@href").extract_first()
            if category_detail_url is not None:
                # 匹配设置 拼音名称
                en_name_result = re.findall(r'/shicimark/(.*).html', category_detail_url)
                if len(en_name_result) == 1:
                    category_info['category_type_en_name'] = en_name_result[0]

                # 设置中文名称
                category_info['category_detail_url'] = category_detail_url
                yield scrapy.Request(str.format("{}{}", self.prefix_url, category_detail_url),
                                     callback=self.parse_detail,
                                     meta={'category_info': copy.deepcopy(category_info)},
                                     dont_filter=True)

    def parse_detail(self, response):
        category_info = response.meta['category_info']
        category_total_str = response.xpath("//div[@id='main_left']/div[@class='card']/h1/text()").extract_first()
        if category_total_str is not None:
            category_total = int("".join(re.findall(r'\d', category_total_str)))
            category_info["category_total"] = int(category_total)
        yield category_info
