# -*- coding: utf-8 -*-
import scrapy


class S1Spider(scrapy.Spider):
    name = 's1'
    allowed_domains = ['blog.csdn.net']
    start_urls = ['http://blog.csdn.net/']

    def start_requests(self):
        yield scrapy.Request(
            url = 'http://blog.csdn.net/',
            callback = self.parse2
        )

    # def parse(self, response):
        # print('*'*30)
        # print(response.xpath('//div[@class="nav_com"]//li/a/text()').extract())
        # print('*' * 30)

    def parse2(self,response):
        # print('*'*30)
        # print(response.xpath('//div[@class="nav_com"]//li/a/text()').extract())
        # print('*' * 30)
        data = response.xpath('//div[@class="nav_com"]//li/a/text()').extract()
        item = {}
        item['data'] = data
        yield item