# -*- coding: utf-8 -*-
from urllib import parse

import scrapy
from scrapy.http import Request

from ArticleSpider.items import JobBoleArticleItem, ArticleItemLoader
from ArticleSpider.utils.common import get_md5


class JobboleSpider(scrapy.Spider):
    name = 'jobbole'
    allowed_domains = ['blog.jobbole.com']
    # start_urls = ['http://blog.jobbole.com/114536']
    start_urls = ['http://blog.jobbole.com/all-posts']

    custom_settings = {
        "COOKIES_ENABLED": True
    }

    def parse(self, response):
        """
        1. 获取文章列表页中的文章url，并交scrapy下载，下载完成后进行解析
        2. 获取下一页的url，并交给scrapy进行下载，下载完成后交给parse
        :param response:
        :return:
        """

        print(response.url)

        ##### 获取列表页中的所有文章的url
        post_nodes = response.xpath("//div[@id='archive']//div[@class='post-thumb']/a")
        for post_node in post_nodes:
            post_url = post_node.xpath('@href').extract_first()
            image_url = post_node.xpath('img/@src').extract_first()

            # 有些url是不带域名，所以要根据reponse.url的域名信息，拼接出含有域名的url
            #
            # urljoin方法很神奇：
            #   reponse.url = http://blog.jobbole.com/all-posts/
            #                        +
            #   post_url = http://blog.jobbole.com/114536/
            #                        =
            #   valid_url = http://blog.jobbole.com/114536/
            valid_url = parse.urljoin(response.url, post_url)
            # 通过yield交给scrapy下载，知识点-->生成器
            yield Request(url=valid_url, callback=self.parse_detail, meta={'front_image_url': image_url})

        ##### 获取下一页的链接
        next_url = response.xpath("//a[@class='next page-numbers']/@href").extract_first()
        print("next_url: %s" % next_url)  # 4-8 9.mp4
        if next_url:
            yield Request(url=next_url, callback=self.parse)

    def parse_detail(self, response):
        """爬取文章页面的具体数据"""

        # 通过item loader 加载item
        front_image_url = response.meta.get("front_image_url", "")
        item_loader = ArticleItemLoader(item=JobBoleArticleItem(), response=response)
        item_loader.add_xpath("title", "//div[@class='entry-header']/h1/text()")
        item_loader.add_value("url", response.url)
        item_loader.add_value("url_object_id", get_md5(response.url))
        item_loader.add_xpath("create_date", "//p[@class='entry-meta-hide-on-mobile']/text()")
        item_loader.add_value("front_image_url", [front_image_url])
        item_loader.add_xpath("praise_nums", "//span[contains(@class,'vote-post-up')]/h10/text()")
        item_loader.add_xpath("comment_nums", "//a[@href='#article-comment']/span/text()")
        item_loader.add_xpath("fav_nums", "//span[contains(@class,'bookmark-btn')]/text()")
        item_loader.add_xpath("tags", "//p[@class='entry-meta-hide-on-mobile']/a/text()")
        item_loader.add_xpath("content", "//div[@class='entry']")
        # front_image_url = response.meta.get("front_image_url", "")

        article_item = item_loader.load_item()
        yield article_item
