# -*- coding: utf-8 -*-
import scrapy
import scrapy_gif.settings as settings
import re
from scrapy.http import Request
from scrapy.loader import ItemLoader
from scrapy_gif.items import ScrapyDetailsItem
from scrapy_gif.utils.common import get_article_id, get_html_str, get_cur_data


class GifDetailsSpider(scrapy.Spider):
    name = 'gif_spider_details'
    allowed_domains = [settings.GIF_HOST.replace('https://', '')]
    start_urls = [
        settings.GIF_HOST + url for url in settings.TYPE_URL_DIC.keys()
    ]

    def parse(self, response):
        """
        1.获取页码
        2.获取列表所有信息
        3.获取下一页
        """
        # 1.获取页码
        pages_str = response.css('.pagination ul li span::text').extract()[-1]
        match_re = re.match(r'.*?(\d+).*', pages_str)
        if match_re:
            pages = int(match_re.group(1))
        else:
            pages = 1

        # 2.获取列表信息
        # 3.获取下一页
        for index in range(1, pages + 1):
            yield Request(url='{url}/page/{index}/'.format(url=response.url, index=index),
                          headers=response.request.headers,
                          cookies=response.request.cookies,
                          # meta=meta_dic,
                          dont_filter=True,
                          callback=self.parse_list)

    def parse_list(self, response):
        list_item = response.css('.content article')
        for item in list_item:
            post_url = item.css('header h2 a::attr(href)').extract_first('')
            yield Request(url=post_url,
                          headers=response.request.headers,
                          cookies=response.request.cookies,
                          dont_filter=True,
                          callback=self.parse_item)

    def parse_item_one(self, response):
        list_p = response.css('.content article p')
        for item in list_p:
            meta_dic = {
                'article_id': get_article_id(response.url),
                'article_img': item.css('img::attr(src)').extract(),
                'article_text': get_html_str(item.extract()),
                'createTime': [get_cur_data()]
            }
            yield Request(url=settings.GIF_HOST,
                          headers=response.request.headers,
                          cookies=response.request.cookies,
                          dont_filter=True,
                          meta=meta_dic,
                          callback=self.parse_item_p)

        # 1.爬取页码  每页都是这个结果，会重复爬，保证执行一次
        list_post = response.css('.article-paging a::attr(href)').extract()
        for node_post in list_post:
            yield Request(url=node_post,
                          headers=response.request.headers,
                          cookies=response.request.cookies,
                          dont_filter=True,
                          # meta=meta_dic,
                          callback=self.parse_item)

    def parse_item(self, response):
        list_p = response.css('.content article p')
        for item in list_p:
            meta_dic = {
                'article_id': get_article_id(response.url),
                'article_img': item.css('img::attr(src)').extract(),
                'article_text': get_html_str(item.extract()),
                'createTime': [get_cur_data()]
            }
            yield Request(url=settings.GIF_HOST,
                          headers=response.request.headers,
                          cookies=response.request.cookies,
                          dont_filter=True,
                          meta=meta_dic,
                          callback=self.parse_item_p)

    def parse_item_p(self, response):
        meta_dic = response.meta
        item_loader = ItemLoader(item=ScrapyDetailsItem(), response=response)

        item_loader.add_value('article_id', meta_dic.get('article_id', [0]))
        item_loader.add_value('article_img', meta_dic.get('article_img', ['']))
        item_loader.add_value('article_text', meta_dic.get('article_text', ['']))
        item_loader.add_value('createTime', meta_dic.get('createTime', [0]))

        article_item = item_loader.load_item()
        yield article_item
