# -*- coding: utf-8 -*-
from urllib.parse import urljoin

import scrapy
from scrapy import Request


class CnblogsSpider(scrapy.Spider):
    name = 'cnblogs'
    allowed_domains = ['news.cnblogs.com']
    start_urls = ['http://news.cnblogs.com/']

    def parse(self, response):
        """
        1. 下载新闻列表页中的新闻的url完成解析并交给scrapy下载。
        2. 获取下一页的url并交给scrapy进行下载，下载完成后parse方法跟进。
        并获取下一页的urls
        :param response: 返回文章内容
        :return:
        """
        # urls = response.xpath('//*[@id="news_list"]//h2/a/@href').extract() # xpath
        # urls = response.css('div#news_list
        post_nodes = response.css('#news_list .news_block')
        for post_node in post_nodes:
            post_url = post_node.css('h2 a::attr(href)').extract_first("未匹配到")
            yield Request(urljoin(response.url, post_url) , callback=self.parse_detail)

        next_url = response.css('div.pager a:last-child::text').extract_first("未匹配到")
        if next_url != "未匹配到":
            next_url = response.css('div.pager a:last-child::attr(href)').extract_first("未匹配到")
            yield Request(urljoin(response.url, next_url) , callback=self.parse)
        pass

    def parse_detail(self, response):
        """
        所有的请求都应该使用异步的方式
        :param response:
        :return:
        """
        url = response.url
        title = response.css('#news_title a::text').extract_first("未匹配到")
        author = response.css('#news_info span.news_poster a::text').extract_first("未匹配到")
        data = response.css('#news_info span.time::text').extract_first("未匹配到")
        readNums = response.css('.title span#num J_view_num::text').extract_first("未匹配到") # #news_info > a:nth-child(6)
        fav_nums= response.css('#news_info > a:nth-child(6)::text').extract_first("未匹配到 ") #
        content = response.css('#news_content').extract_first("未匹配到")  #
        tags = response.css('#news_more_info > div > a::text').extract_first("未匹配到")