import scrapy


class CsdnBlogSpider(scrapy.Spider):
    name = 'csdn_blog'
    allowed_domains = ['blog.csdn.net']
    # start_urls = ['http://blog.csdn.net/']
    #
    # def parse(self, response):
    #     print('*' * 20)
    #     print(response.xpath('//div[@class="nav_com"]/ul/li/a/text()').extract())

    kw = 'python_'

    # 一级页面
    def start_requests(self):
        for pn in range(1, 4):
            url = 'https://so.csdn.net/so/search/s.do?p=%s&q=%s&t=&viparticle=&domain=&o=&s=&u=&l=&f=' % (pn, self.kw)
            yield scrapy.Request(
                url= url,
                callback=self.parse
            )

    def parse(self, response):
        # print('*' * 20)
        # print(response.xpath('//div[@class="nav_com"]/ul/li/a/text()').extract())
        # data = response.xpath('//div[@class="nav_com"]/ul/li/a/text()').extract()
        # item = {}
        # item['data'] = data
        # yield item
        href_s = response.xpath('//div[@class="limit_width"]/a/@href').extract()
        for href in href_s:
            yield scrapy.Request(
                url= href,
                callback= self.parse1
            )


    def parse1(self, response):
        title = response.xpath('//h1[@class="title-article"]/text()').extract_first()
        data = response.body
        item = dict(
            title = title,
            data = data
        )
        yield item

