# -*- coding: utf-8 -*-
import scrapy


class Z1Spider(scrapy.Spider):
    name = 'z1'
    allowed_domains = ['blog','blog.csdn.net','so.csdn.net']
    keword = 'java'
    p = '2'
    # start_urls = ['http://allowed_domain/']
    item = {}
    def start_requests(self):
        yield scrapy.Request(
            url=f'https://so.csdn.net/so/search/s.do?p={self.p}&q={self.keword}&t=&viparticle=&domain=&o=&s=&u=&l=&f=',
            callback=self.parse
        )
    def parse(self, response):
        href_s = response.xpath("//dl/dt/div/a/@href").extract()
        for href in href_s:
            # print(href)
            yield scrapy.Request(
                url=href,
                callback=self.parse_blog
            )
    def parse_blog(self,response):
        item = dict(
            title=response.xpath("//div[@class='article-title-box']//h1//text()").extract_first(),
            data=response.body
        )
        yield item
        # print(response.body)
        # print(response.body)
