import scrapy
import re


class CsdnBlogSpider(scrapy.Spider):
    name = 'csdn_blog'
    allowed_domains = ['blog.csdn.net']

    def start_requests(self):
        keyword = input('关键词:')
        pn_start = int(input('起始页:'))
        pn_end = int(input('终止页:'))
        # 遍历起始页到终止页的之间的页数
        for page in range(pn_start, pn_end + 1):
            # 向调度器发送请求，并调用parse_page1函数
            requset = scrapy.Request(
                url='https://so.csdn.net/api/v2/search?q={keyword}&t=blog&p={page}&s=0&tm=0&lv=-1&ft=0&l=&u=&platform=pc'.format(
                    keyword=keyword, page=page),
                callback=self.parse_page1,
            )
            yield requset

    def parse_page1(self, response):
        # 使用正则表达式提取到所有文章的url
        ret_s = re.findall('"url":"(.*?)"', response.body.decode('utf-8'))
        for ret in ret_s:
            # 向调度器发送请求，并调用parse_page2函数
            yield scrapy.Request(
                url=ret,
                callback=self.parse_page2,
            )

    def parse_page2(self, response):
        # 对response做xpath提取，提取到每个文章的标题
        title = response.xpath('//h1[@class="title-article"]/text()').extract()[0]
        # 文件名中不能有特殊字符，替换为空
        title = re.sub(
            r'[/\\:*"<>|?]', '', title
        )
        filepath = 'blog/%s.html' % title
        # 写入文件
        with open(filepath, 'wb') as f:
            f.write(response.body)
