# -*- coding: utf-8 -*-
import scrapy
import logging
from scrapy import signals
from selenium import webdriver


logger = logging.getLogger(__name__)


class CSpider(scrapy.Spider):
    name = 'c'
    allowed_domains = ['baidu.com']
    start_urls = ['https://www.baidu.com'
                  '']


    # 重写了父类的from_crawler方法，那么必须要在此类定义spider_closed方法，不然会报错
    # ------------------信号-----------------信号-----------------信号-----------------
    @classmethod
    def from_crawler(cls, crawler, *args, **kwargs):
        # ① 调用父类的from_crawler方法
        spider = super(CSpider, cls).from_crawler(crawler, *args, **kwargs)
        # ② 创建一个chrome对象
        spider.chrome = webdriver.Chrome()
        crawler.signals.connect(spider.spider_closed, signal=signals.spider_closed)
        return spider

    # 整个Scrapy最后执行的代码
    def spider_closed(self, spider):
        print(f"{spider.name} --> was end")
        print("爬虫结束了")
        spider.chrome.quit()
    # ------------------信号-----------------信号-----------------信号-----------------


    def parse(self, response):
        print("parse is called")
        # for i in range(5):
        #     item = {}
        #     item["name"] = f"claus{i}"
        #     logger.warning(item)
        #     yield item
        url = "https://www.baidu.com/s?tn=baidutop10&wd=%E4%BC%8A%E4%B8%87%E5%8D%A1%E4%B8%BA%E7%89%B9%E6%9C%97%E6%99%AE%E7%8C%AE%E4%B8%8A%E7%94%9F%E6%97%A5%E7%A5%9D%E7%A6%8F&rsv_idx=2&usm=1&ie=utf-8&rsv_cq=6%E5%B2%81%E5%A5%B3%E5%AD%A9%E9%9A%8F%E7%88%B8%E5%A6%88%E8%87%AA%E9%A9%BE%E8%B5%B4%E5%8C%97%E4%BA%AC%E5%90%8E%E7%A1%AE%E8%AF%8A&rsv_dl=0_right_fyb_pchot_20811_01&rsf=9c0f72eda4a1d94d17d69198b34d7307_1_15_2&rqid=89a371b900002e4d"
        yield scrapy.Request(
            url=url,
            callback=self.parse2
        )

    def parse2(self, response):
        print("parse2 is called")