# -*- coding: utf-8 -*-
import time

import scrapy


class S02Spider(scrapy.Spider):
    name = 's02'
    # allowed_domains = ['https://blog.csdn.net/']
    # start_urls = ['http://https://blog.csdn.net//']
    kw = input('请输入关键字:')
    start_pn = int(input('start_page:'))
    end_pn = int(input('end_page:'))

    def start_requests(self):
        for pn in range(self.start_pn, self.end_pn + 1):
            yield scrapy.Request(
                url='https://so.csdn.net/so/search/s.do?p=%s&q=%s&t=blog&domain=&o=&s=&u=&l=&f=&rbg=0'%(pn, self.kw),
                callback=self.parse
            )

    def parse(self, response):
        href_s = response.xpath('//span[@class="down fr"]/../span[@class="link"]/a/@href').extract()
        print('*' * 60)
        print(href_s)
        for href in href_s:
            time.sleep(2)
            print('*' * 60)
            print(href)
            yield scrapy.Request(
                url=href,
                callback=self.parse2
            )

    def parse2(self, response):
        # title = response.xpath('//h1[@class="title-article"]/text()').extract_first()
        # print('*' * 60)
        # print('title=', title)
        # data = response.body
        item = dict(
            title=response.xpath('//h1[@class="title-article"]/text()').extract_first(),
            data=response.body
        )
        yield item