import scrapy
import  json
import  re



class BlogcjSpider(scrapy.Spider):
    name = 'blogcj'
    allowed_domains = ['blog.csdn.net','so.csdn.net'] #需要采集的2个域名
    # start_urls = ['so.csdn.net']
    keyword = 'python'


    def start_requests(self):
        page_start = 1
        page_end =1

        for i in range(page_start,page_end+1):
            url = 'https://so.csdn.net/api/v2/search?q={keyword}&t=blog&p={page}&s=0&tm=0&lv=-1&ft=0&l=&u=&platform=pc'.format(keyword = self.keyword,page = i)  # 获取blog信息
            yield  scrapy.Request(
                url,
                callback=self.parse
            )
    def parse(self, response):
        zhi = json.loads(response.text)
        re_turn = zhi['result_vos']
        for item in re_turn:
            href = item['url']
            yield scrapy.Request(
                    href,
                    callback=self.parse1
            )

    def parse1(self,response):
        # print('abcde',response.text)
        # try:
            zhi = response.text
            zhi1 = re.sub("\n",'',zhi)
            title1 = re.findall("<title>(.*?)</title>",zhi1)
            title = re.sub(r'[/\\:*"<>|?]', '', title1[0])
            data = response.body
            # item = dict(
            #     title=title,
            #     data=data,
            # )
            print(title)
            with open('csdn_blog/%s.html' % title, 'wb') as f:
                print('abc')
                f.write(data)


        # except:
        #     pass
        # print(title)
