from datetime import timedelta
from urllib.parse import urljoin

import scrapy
from scrapy import Request
from scrapy.utils.project import get_project_settings

from utils.data_process import DataProcess
from wuliu_news.api.dicts.news_dict import news_catid_dict
from wuliu_news.items import WuliuNewsItem


class ZcfgnewsSpider(scrapy.Spider):
    """政策法规"""
    name = 'zcfgNews'
    allowed_domains = ['chinawuliu.com.cn']
    start_urls = ['http://chinawuliu.com.cn/']

    moduleid = 21
    newsTypes = {
        'http://www.chinawuliu.com.cn/zcfg/gwy/': "法律法规",
        'http://www.chinawuliu.com.cn/zcfg/fgw/': "法律法规",
        'http://www.chinawuliu.com.cn/zcfg/jtb/': "法律法规",
        'http://www.chinawuliu.com.cn/zcfg/gxb/': "法律法规",
        'http://www.chinawuliu.com.cn/zcfg/gab/': "法律法规",
        'http://www.chinawuliu.com.cn/zcfg/swb/': "法律法规",
        'http://www.chinawuliu.com.cn/zcfg/czb/': "法律法规",
        'http://www.chinawuliu.com.cn/zcfg/swzj/': "法律法规",
        'http://www.chinawuliu.com.cn/zcfg/hgzs/': "法律法规",
        'http://www.chinawuliu.com.cn/zcfg/yzj/': "法律法规",
        'http://www.chinawuliu.com.cn/zcfg/dfjqt/': "法律法规",
    }

    settings = get_project_settings()
    FROM_DOMAIN = settings.get("SOURCE_DOMAIN")
    limit_day = settings.get("TODAY") - timedelta(days=settings.get("CRAWL_CYCLE", default=1) - 1)

    def start_requests(self):
        for start_url, news_type in self.newsTypes.items():
            headers = {
                "Referer": 'http://www.chinawuliu.com.cn/zcfg/'
            }
            yield Request(url=start_url, headers=headers, callback=self.parse_list, dont_filter=True,
                          meta={'start_url': start_url, 'news_type': news_type, 'page': 1})

    def parse(self, response, **kwargs):
        pass

    def parse_list(self, response, **kwargs):
        """
        :param response:
        :param kwargs:
        :return:
        """
        news_type = response.meta['news_type']
        cat_id = news_catid_dict[news_type]

        cont_lis = response.css("div.leftRow ul.list-box>li")
        for cont_li in cont_lis:
            news_time = cont_li.css("span.time::text").get()
            news_time = news_time.replace("/", "-")
            if news_time < str(self.limit_day):
                return
            newsItem = WuliuNewsItem(cat_id=cat_id)
            newsItem.title = cont_li.css("li>a::attr(title)").get()
            newsItem.addtime = news_time

            href = cont_li.css("li>a::attr(href)").get()
            detail_url = urljoin(self.FROM_DOMAIN, href)
            newsItem.fromurl = detail_url

            headers = {
                "Referer": response.url
            }
            yield Request(url=detail_url, headers=headers, callback=self.parse_detail, dont_filter=True,
                          meta={'item': newsItem}
                          )

        start_url = response.meta['start_url']
        next_page = response.meta['page'] + 1
        next_url = urljoin(start_url, f"index_{next_page}.shtml")

        max_page = max([int(p) for p in response.css('.pagination>li>a[href*="index"]::text').getall() if p.strip()])
        if next_page > max_page:
            return

        headers = {
            "Referer": response.url
        }
        yield Request(url=next_url, headers=headers, callback=self.parse_list, dont_filter=True,
                      meta={'start_url': start_url, 'news_type': news_type, 'page': next_page})

    def parse_detail(self, response, **kwargs):
        """
        :param response:
        :param kwargs:
        :return:
        """
        newsItem: WuliuNewsItem = response.meta['item']
        copyfrom = [t.strip() for t in response.css(".ul-title>p.new-time::text").getall() if t.strip()]
        if copyfrom:
            newsItem.copyfrom = copyfrom
        content = response.css("div.leftRow>div.text").get()
        content = DataProcess.complete_url(content, with_domain=self.FROM_DOMAIN)
        content = DataProcess.rm_pic_pixs(content)
        newsItem.content = content

        newsItem.set_defaults(skip_time=True)
        values = newsItem.values
        yield values