from datetime import timedelta
from urllib.parse import urljoin

import scrapy
from scrapy import Request
from scrapy.utils.project import get_project_settings

from utils.data_process import DataProcess
from wuliu_news.api.dicts.qyalfx_dict import qyalfx_catid_dict
from wuliu_news.items import WuliuQyalfxItem


class QyalfxSpider(scrapy.Spider):
    """企业案例分享"""
    name = 'qyalfx'
    allowed_domains = ['chinawuliu.com.cn']
    start_urls = ['http://chinawuliu.com.cn/']

    moduleid = 55
    newsTypes = {
        'http://www.chinawuliu.com.cn/xsyj/qyal/': "选择分类",
    }

    settings = get_project_settings()
    FROM_DOMAIN = settings.get("SOURCE_DOMAIN")
    limit_day = settings.get("TODAY") - timedelta(days=settings.get("CRAWL_CYCLE", default=1) - 1)

    def start_requests(self):
        for start_url, news_type in self.newsTypes.items():
            headers = {
                "Referer": "http://www.chinawuliu.com.cn/xsyj/"
            }
            yield Request(url=start_url, headers=headers, callback=self.parse_list, dont_filter=True,
                          meta={'start_url': start_url, 'news_type': news_type, 'page': 1}
                          )

    def parse(self, response, **kwargs):
        pass

    def parse_list(self, response, **kwargs):
        """
        :param response:
        :param kwargs:
        :return:
        """
        news_type = response.meta['news_type']
        cat_id = qyalfx_catid_dict[news_type]

        cont_lis = response.css("div.leftRow ul.list-box>li")
        for cont_li in cont_lis:
            news_time = cont_li.css("span.time::text").get()
            news_time = news_time.replace("/", "-")
            if news_time < str(self.limit_day):
                return
            qyalfxItem = WuliuQyalfxItem(cat_id=cat_id)
            qyalfxItem.title = cont_li.css("li>a::attr(title)").get()
            qyalfxItem.addtime = news_time

            href = cont_li.css("li>a::attr(href)").get()
            detail_url = urljoin(self.FROM_DOMAIN, href)
            qyalfxItem.fromurl = detail_url

            headers = {
                "Referer": response.url
            }
            yield Request(url=detail_url, headers=headers, callback=self.parse_detail, dont_filter=True,
                          meta={'item': qyalfxItem}
                          )
        start_url = response.meta['start_url']
        next_page = response.meta['page'] + 1
        next_url = urljoin(start_url, f"index_{next_page}.shtml")

        max_page = max([int(p) for p in response.css('.pagination>li>a[href*="index"]::text').getall() if p.strip()])
        if next_page > max_page:
            return

        headers = {
            "Referer": response.url
        }
        yield Request(url=next_url, headers=headers, callback=self.parse_list, dont_filter=True,
                      meta={'start_url': start_url, 'news_type': news_type, 'page': next_page})

    def parse_detail(self, response, **kwargs):
        """
        :param response:
        :param kwargs:
        :return:
        """
        qyalfxItem: WuliuQyalfxItem = response.meta['item']
        copyfrom = [t.strip() for t in response.css(".ul-title>p.new-time::text").getall() if t.strip()]
        if copyfrom:
            qyalfxItem.copyfrom = copyfrom
        content = response.css("div.leftRow>div.text").get()
        content = DataProcess.complete_url(content, with_domain=self.FROM_DOMAIN)
        content = DataProcess.rm_pic_pixs(content)
        qyalfxItem.content = content

        qyalfxItem.set_defaults(skip_time=True)
        values = qyalfxItem.values
        yield values


if __name__ == '__main__':
    from scrapy import Selector
    t = """<ul class="pagination">
              
              
                <li class="page-item"><span class="page-link"><i class="iconfont icon11"></i></span></li>
              
              
              
                <li class="page-item active"><a class="page-link" href="index.shtml" title="1">1</a></li>
                
                  <li class="page-item "><a class="page-link" href="index_2.shtml" title="2">2</a></li>
                
                
                
                
                  <li class="page-item "><a class="page-link" href="index_3.shtml" title="3">3</a></li>
                
                  <li class="page-item "><a class="page-link" href="index_4.shtml" title="4">4</a></li>
                
                  <li class="page-item "><a class="page-link" href="index_5.shtml" title="5">5</a></li>
                
                  <li class="page-item "><a class="page-link" href="index_6.shtml" title="6">6</a></li>
                
                  <li class="page-item "><a class="page-link" href="index_7.shtml" title="7">7</a></li>
                
                
                
                  <li class="page-item"><span class="page-link ellipsis">...</span></li>
                
                <li class="page-item "><a class="page-link" href="http://www.chinawuliu.com.cn/zcms/ui/catalog/15167/pc/index_50.shtml" title="50">50</a></li>
              
              
                <li class="page-item"><a class="page-link" href="index_2.shtml" title="下一页"><i class="iconfont icon12"></i></a></li>
              
              
            </ul>"""
    sel = Selector(text=t)
    max_page = max([int(p) for p in sel.css('.pagination>li>a[href*="index"]::text').getall() if p.strip()])
    print(max_page)