# -*- coding: utf-8 -*-
import scrapy

# 爬虫类 --继承 scrapy  爬虫类
class ExampleSpider(scrapy.Spider):
    name = 'baidu'  # 爬虫名字
    allowed_domains = ['baidu.com'] # 允许爬取的域名范围
    start_urls = ['http://www.baidu.com/']  # 开始爬取的url

    def parse(self, response):
        print('*'*50)
        print(response.request.headers)
        print('*'*50)
        pass
'''
需要在终端执行指令
    scrapy  crawl  +爬虫名字   -o  +导出文件的名字
    生成公私钥》》》 ssh-keygen -t rsa -C " "
'''