# -*- coding: utf-8 -*-
import scrapy


class S1Spider(scrapy.Spider):
    name = 's1'
    allowed_domains = ['blog.csdn.net']
    # start_urls = ['http://blog.csdn.net/']

    # def parse(self, response):
    #         print(response.body)
    #     # print(response.xpath('//div[@class="nav_com"]//li/a/text()'))   #生成一个Selector对象
    #     print(response.xpath('//div[@class="nav_com"]//li/a/text()').extract()) #拿到列表文本对象

    def start_requests(self):
        yield scrapy.Request(
            url='http://blog.csdn.net/',
            callback=self.parse2
        )
    def parse2(self,response):
        print(response.xpath('//div[@class="nav_com"]//li/a/text()').extract())
        data = response.xpath('//div[@class="nav_com"]//li/a/text()').extract()
        item = {}
        item['data']  = data
        yield item
