import re
from datetime import timedelta
from urllib.parse import urljoin

import scrapy
from scrapy import Request
from scrapy.utils.project import get_project_settings

from utils.data_process import DataProcess
from wuliu_news.api.dicts.biaozhun_dict import biaozhun_catid_dict
from wuliu_news.items import WuliuBiaozhunNewsItem


class WlbznewsSpider(scrapy.Spider):
    """物流标准"""
    name = 'wlbzNews'
    allowed_domains = ['chinawuliu.com.cn/']
    start_urls = ['http://wlbz.chinawuliu.com.cn/gzdt/']

    moduleid = 7
    newsTypes = {
        'http://wlbz.chinawuliu.com.cn/gzdt/': "最新资讯",      # 工作动态
        'http://wlbz.chinawuliu.com.cn/bzgg/': "标准要闻",      # 标准公告
        'http://wlbz.chinawuliu.com.cn/wlbzzqyj/': '最新资讯',  # 标准征求意见
        'http://wlbz.chinawuliu.com.cn/bzxmjh/': '标准要闻',    # 标准项目计划
    }

    settings = get_project_settings()
    FROM_DOMAIN = "http://wlbz.chinawuliu.com.cn"        # crawler.settings.get("SOURCE_DOMAIN")
    limit_day = settings.get("TODAY") - timedelta(days=settings.get("CRAWL_CYCLE", default=1) - 1)

    def start_requests(self):
        for start_url, news_type in self.newsTypes.items():
            headers = {
                "Referer": "http://wlbz.chinawuliu.com.cn/"
            }
            yield Request(url=start_url, headers=headers, callback=self.parse_list, dont_filter=True,
                          meta={'start_url': start_url, 'news_type': news_type, 'page': 1})

    def parse(self, response, **kwargs):
        pass

    def parse_list(self, response, **kwargs):
        """
        :param response:
        :param kwargs:
        :return:
        """
        news_type = response.meta['news_type']
        cat_id = biaozhun_catid_dict[news_type]

        content_lis = response.css("div.list_R>ul>li")

        for cont_li in content_lis:
            bznewsItem = WuliuBiaozhunNewsItem(cat_id=cat_id)

            title = cont_li.css("a::text").get()
            # /bzxmjh/202112/07/565951.shtml
            href = cont_li.css("a::attr(href)").get()

            # （2021-12-07）
            news_time = cont_li.css("li>span::text").get()
            news_time = news_time.lstrip("(（").rstrip("）)")

            if news_time < str(self.limit_day):
                return

            bznewsItem.title = title
            bznewsItem.addtime = news_time

            detail_url = urljoin(self.FROM_DOMAIN, href)
            headers = {
                "Referer": response.url
            }
            yield Request(url=detail_url, headers=headers, callback=self.parse_detail, dont_filter=True,
                          meta={"item": bznewsItem}
                          )

        start_url = response.meta['start_url']
        next_page = response.meta['page'] + 1
        next_url = urljoin(start_url, f"index_{next_page}.shtml")

        pagecount = response.css(".pagercount::text").get(default="")
        try:
            max_page = int(pagecount.lstrip("共").split("页")[0].strip())
        except Exception as e:
            max_page = 3

        if next_page > max_page:
            return

        headers = {
            "Referer": response.url
        }
        yield Request(url=next_url, headers=headers, callback=self.parse_list, dont_filter=True,
                      meta={'start_url': start_url, 'news_type': news_type, 'page': next_page})

    def parse_detail(self, response, **kwargs):
        """
        :param response:
        :param kwargs:
        :return:
        """
        bznewsItem: WuliuBiaozhunNewsItem = response.meta['item']
        title = response.css("div.box_con>h2::text").get()
        if title:
            bznewsItem.title = title
        content = response.css("div.box_con>div").get()
        content = DataProcess.complete_url(content, with_domain=self.FROM_DOMAIN)
        content = DataProcess.rm_pic_pixs(content)
        bznewsItem.content = content

        bznewsItem.set_defaults(skip_time=True)
        values = bznewsItem.values
        yield values


if __name__ == '__main__':
    s = "共 3 页  56 条数据"
    print(int(s.lstrip("共").split("页")[0].strip()))