# -*- coding: utf-8 -*-
import scrapy
import scrapy_gif.settings as settings
import re
from scrapy.http import Request
from scrapy.loader import ItemLoader
from scrapy_gif.items import ScrapyListItem
from scrapy_gif.utils.common import get_article_id, str_data_to_num, get_cur_data, get_re_num


class GifListSpider(scrapy.Spider):
    name = 'gif_spider_list'
    allowed_domains = [settings.GIF_HOST.replace('https://', '')]
    start_urls = [
        settings.GIF_HOST + url for url in settings.TYPE_URL_DIC.keys()
    ]

    def parse(self, response):
        """
        1.获取页码
        2.获取列表所有信息
        3.获取下一页
        """
        # 1.获取页码
        pages_str = response.css('.pagination ul li span::text').extract()[-1]
        match_re = re.match(r'.*?(\d+).*', pages_str)
        if match_re:
            pages = int(match_re.group(1))
        else:
            pages = 1

        type_name = ''
        for (k, v) in settings.TYPE_URL_DIC.items():
            if k in response.url:
                type_name = settings.TYPE_URL_DIC.get(k, '')
        meta_dic = {'type_name': type_name}

        # 2.获取列表信息
        # 3.获取下一页
        for index in range(1, pages + 1):
            yield Request(url='{url}/page/{index}/'.format(url=response.url, index=index),
                          headers=response.request.headers,
                          cookies=response.request.cookies,
                          meta=meta_dic,
                          dont_filter=True,
                          callback=self.parse_list)

    def parse_list(self, response):
        meta_dic = response.meta
        list_item = response.css('.content article')
        for item in list_item:
            post_url = item.css('header h2 a::attr(href)').extract_first('')
            article_img = item.css('a img::attr(data-src)').extract()
            if len(article_img) <= 0:
                article_img = item.css('a img::attr(src)').extract()
            meta_dic = {
                'article_id': get_article_id(item.css('a.focus::attr(href)').extract_first('0')),
                'type_name': [meta_dic.get('type_name', '')],
                'article_img': article_img,
                'article_title': item.css('header h2 a::text').extract(),
                'article_detailsSrc': item.css('header h2 a::attr(href)').extract(),
                'article_imgCount': item.css('header small::text').extract(),
                'article_sendTime': str_data_to_num(item.css('p time::text').extract_first('')),
                'article_author': item.css('p span.author::text').extract(),
                'article_readCount': get_re_num(item.css('p span.pv::text').extract_first('')),
                'article_commentsCount': get_re_num(item.css('p span.pv::text')[-1].extract()),
                'article_likeCount': get_re_num(item.css('p span.pc::text').extract_first('')),
                'article_note': item.css('p.note::text').extract(),
                'createTime': [get_cur_data()]
            }
            yield Request(url=post_url,
                          headers=response.request.headers,
                          cookies=response.request.cookies,
                          dont_filter=True,
                          meta=meta_dic,
                          callback=self.parse_item)

    def parse_item(self, response):
        meta_dic = response.meta
        item_loader = ItemLoader(item=ScrapyListItem(), response=response)

        item_loader.add_value('article_id', meta_dic.get('article_id', [0]))
        item_loader.add_value('type_name', meta_dic.get('type_name', ['']))
        item_loader.add_value('article_img', meta_dic.get('article_img', ['']))
        item_loader.add_value('article_title', meta_dic.get('article_title', ['']))
        item_loader.add_value('article_detailsSrc', meta_dic.get('article_detailsSrc', ['']))
        item_loader.add_value('article_imgCount', meta_dic.get('article_imgCount', [0]))
        item_loader.add_value('article_sendTime', meta_dic.get('article_sendTime', [0]))
        item_loader.add_value('article_author', meta_dic.get('article_author', ['']))
        item_loader.add_value('article_readCount', meta_dic.get('article_readCount', [0]))
        item_loader.add_value('article_commentsCount', meta_dic.get('article_commentsCount', [0]))
        item_loader.add_value('article_likeCount', meta_dic.get('article_likeCount', [0]))
        item_loader.add_value('article_note', meta_dic.get('article_note', ['']))
        item_loader.add_css('article_tag', '.article-tags a::text')
        item_loader.add_value('createTime', meta_dic.get('createTime', [0]))

        article_item = item_loader.load_item()
        yield article_item
