# -*- coding: utf-8 -*-
import scrapy
import urllib.request as ur
import urllib.parse as up
import re


class SlSpider(scrapy.Spider):
    name = 'sl'
    allowed_domains = ['csdn.net']
    keyWord='多线程'
    # start_urls = ['http://blog.csdn.net/']

    # def start_requests(self):
    #     yield scrapy.Request(
    #         url='http://blog.csdn.net/',
    #         callback=self.parse2
    #     )
    #
    # def parse2(self, response):
    #     print('-' * 30)
    #     print(response.xpath('//div[@class="nav_com"]//li/a/text()').extract())
    #     print('-' * 30)

    def start_requests(self):
        for pn in range(1,2):
            dataurl = {
                'p': pn,
                'q': self.keyWord
            }
            url = 'https://so.csdn.net/so/search/s.do?{urlquery}&t=blog&viparticle=&domain=&o=&s=&u=&l=&f=&rbg=0'.format(
                urlquery=up.urlencode(dataurl))
            yield scrapy.Request(
                url=url,
                callback=self.parse
            )

    def parse(self, response):
        print('@' * 30)
        print(response.xpath("//div[@class='search-list-con']/dl//span[@class='mr16']/../../dt//a/@href").extract())
        href_s=response.xpath("//div[@class='search-list-con']/dl//span[@class='mr16']/../../dt//a/@href").extract()
        for href in href_s:
            yield scrapy.Request(
                url=href,
                callback=self.parse2
            )

    def parse2(self,response):
        title=response.xpath("//h1[@class='title-article']/text()").extract_first()
        title = re.sub(
            r'[/\\:*"<>|?]', '', title
        )
        data=response.body
        item=dict(
            title=title,
            data=data
        )
        yield item