# -*- coding: utf-8 -*-
import scrapy
import lxml.etree as le
import urllib.request as ur

class CsdnTestSpider(scrapy.Spider):
    name = 'csdn_test'
    allowed_domains = ['blog.csdn.net']
    # start_urls = ['http://blog.csdn.net/']
    keyword = 'python'
    def start_requests(self):
        for pn in range(1,11):
            url =    'https://so.csdn.net/so/search/s.do?p=%s&q=%s&t=blog&domain=&o=&s=&u=&l=&f=&rbg=0' % (pn, self.keyword)
            yield scrapy.Request(
                url=url,
                callback = self.parse
            )
    def parse(self, response):
        href_s = le.HTML(response).xpath("//span[@class='down fr']/../span[@class='link']/a/@href").extract()
        for href in href_s:
            # 调用这个request已经是访问这个url,页面已经有了，再调用callback
            yield scrapy.Request(
                url=href,
                callback=self.parse2
            )
    def parse2(self,response):
        item = dict(
            title =le.HTML(response).xpath('//h1[@class="title-article"]/text()').extract_first(),
            data = response.body()
        )
        yield item
        # 爬虫文件只管拿数据，然后yield出去
        # 然后保存文件的操作交给pipeline