# -*- coding: utf-8 -*-
import scrapy


class CsdnBlogSpider(scrapy.Spider):
    name = 'csdn_blog'
    allowed_domains = ['blog.csdn.net']
    keyword = 'python'
    def start_requests(self):
        for pn in range(1,11):
            url = 'https://so.csdn.net/so/search/s.do?p={}&q=python+&t=blog&viparticle=&domain=&o=&s=&u=nav/python&l=&rbg=1'.format(pn)
            yield scrapy.Request(
                url = url,
                callback=self.parse
            )

    def parse(self, response):
        href_s = response.xpath('//div[@class="search-list-con"]/dl//span[@class="mr16"]/../../dt/div/a[1]/@href').extract()
        for href in href_s:
            yield scrapy.Request(
                url = href,
                callback=self.parse2
            )

    def parse2(self, response):
        title = response.xpath('//h1[@class="title-article"]/text()').extract_first()
        data = response.body
        item = dict(
            title = title,
            data = data
        )
        yield item
