# -*- coding: utf-8 -*-
import scrapy


class CsdnblogSpider(scrapy.Spider):
    name = 'csdnblog'
    allowed_domains = ['blog.csdn.net']

    # start_urls = ['http://blog.csdn.net/']

    def start_requests(self):
        for i in range(1, 3):
            url = 'https://so.csdn.net/so/search/s.do?p=%s&q=python&t=blog&domain=&o=&s=&u=&l=&f=&rbg=0' % i
            yield scrapy.Request(
                url=url,
                callback=self.parse1
            )

    # 这是查询首页的
    # def parse(self, response):
    #     # yield response.body()
    #     print('response ==',response.body())
    #     print('nav_com ==',response.xpath('//div[@class="nav_com"]//li/a/text()').extract())

    # //span[@class='down fr']/../span[@class='link']/a/@href  找到热门文章的地址
    def parse1(self, response):
        # print('response==',response)
        # response.body()是byte类型
        # response== <200 https://so.csdn.net/so/search/s.do?p=1&q=python&t=blog&domain=&o=&s=&u=&l=&f=&rbg=0>

        # 使用le.HTML(response) 出现错误can only parse strings
        # href_s = le.HTML(response).xpath("//span[@class='down fr']/../span[@class='link']/a/@href").extract()

        href_s = response.xpath("//span[@class='down fr']/../span[@class='link']/a/@href").extract()
        print('href_s==', href_s)
        for href in href_s:
            yield scrapy.Request(
                url=href,
                callback=self.parse2
            )

    # '//h1[@class="title-article"]/text()'       找到标题
    def parse2(self, response):
        print('scrapy response',response)
        # item = dict(
        #     # 也和上面一样， le.HTML(responose)会出现错误，title = le.HTML(responose).xpath('//h1[@class="title-article"]/text()').extract_first()
        #     title=response.xpath('//h1[@class="title-article"]/text()').extract_first(),
        #     data=response.body
        # )
        # yield item
