# -*- coding: utf-8 -*-
import scrapy
from queue import Queue
import json

class MarkerdownBlogSpider(scrapy.Spider):
    name = 'blog'
    allowed_domains = ['blog.csdn.net', 'mp.csdn.net']
    start_urls = ['https://blog.csdn.net/qq_33430083/article/list/1']

    next_page_url_format = 'https://blog.csdn.net/qq_33430083/article/list/{page_num}'
    next_page_url_queue = Queue(100)
    blog_info_url_format = 'https://mp.csdn.net/mdeditor/getArticle?id={blog_id}'
    cookies = {}
    blog_headers = {
        'Referer': None,
        'Host': 'mp.csdn.net'
    }

    def start_requests(self):
        # 设置剩余的每页链接
        for x in range(1, 3):
            self.next_page_url_queue.put(self.next_page_url_format.format(page_num=x))

        yield scrapy.Request(self.next_page_url_queue.get(), callback=self.parse, cookies=self.cookies)


    def parse(self, response):
        # 获取每页的博客id
        blog_ids = response.xpath('//div[@class="article-list"]/div[@data-articleid]/@data-articleid').extract()


        for blog_id in blog_ids:
            yield scrapy.Request(self.blog_info_url_format.format(blog_id=blog_id), callback=self.crawl_blog_info,
                                 cookies=self.cookies, headers=self.blog_headers)
        # 获取下一页
        # 为空则证明没有了结束
        if self.next_page_url_queue.empty():
            return
        yield scrapy.Request(self.next_page_url_queue.get(), callback=self.parse, cookies=self.cookies)


    def crawl_blog_info(self, response):
        """
        爬取博客内容
        :param response:
        :return:
        """
        blog_info = json.loads(response.text)
        id = blog_info['id']
        title = blog_info['title']
        description = blog_info['description']
        content = blog_info['content']
        markdowncontent = blog_info['markdowncontent']
        tags = blog_info['tags']
        categories = blog_info['categories']