import scrapy
from selenium import webdriver
from  selenium.webdriver.chrome.options import Options    # 使用无头浏览器
chorme_options = Options()
chorme_options.add_argument("--headless")
chorme_options.add_argument("--disable-gpu")
class ZhihuSpider(scrapy.Spider):
    name = 'zhihu'
    allowed_domains = ['zhihu.com']
    start_urls = ['http://zhihu.com/']

    # 实例化一个浏览器对象
    def __init__(self):
        self.browser = webdriver.Chrome(chrome_options=chorme_options)
        super().__init__()
    def close(self, spider):
        self.browser.quit()
    def start_requests(self):
        url = "https://www.zhihu.com/question/393696749/answer/1577412779"
        response = scrapy.Request(url, callback=self.parse_index)
        yield response
    # 整个爬虫结束后关闭浏览器

    # 访问主页的url, 拿到对应板块的response
    def parse_index(self, response):
        print(response.text)


