# Define here the models for your spider middleware
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html

from scrapy import signals
import random
import logging
import time
from scrapy.downloadermiddlewares.useragent import UserAgentMiddleware
from scrapy.downloadermiddlewares.httpproxy import HttpProxyMiddleware
# useful for handling different item types with a single interface
from itemadapter import is_item, ItemAdapter
import csv
from selenium import webdriver


class MfkRedisSpiderMiddleware:
    @classmethod
    def from_crawler(cls, crawler):
        s = cls()
        crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
        return s

    def spider_opened(self, spider):
        # chrome_options = webdriver.ChromeOptions()
        # chrome_options.add_argument('-headless')
        # chrome_options.add_argument('--window-size=1920,1080')
        # chrome_options.add_argument('lang = zh_CN.UTF - 8')
        # spider.browser = webdriver.Chrome(chrome_options=chrome_options)
        self.fieldnames = ['province', 'city', 'name', 'level', 'kind',
                           'mode', 'alias', 'address', 'telephone', 'scale', 'process', 'remarks', 'url']
        self.filename = 'mfk_hos.csv'
        spider.file = open(self.filename, 'a', encoding='utf-8', newline='')
        spider.writer = csv.DictWriter(spider.file, self.fieldnames)
        spider.writer.writeheader()

        self.fieldnames1 = ['name', 'jobtitle', 'teachtitle', 'grade', 'type', 'province', 'city', 'hospital', 'department', 'department1', 'goodat',
                            'remarks', 'multi_job', 'url']
        self.filename1 = 'mfk_doc.csv'
        spider.file1 = open(self.filename1, 'a', encoding='utf-8', newline='')
        spider.writer1 = csv.DictWriter(spider.file1, self.fieldnames1)
        spider.writer1.writeheader()

    def spider_closed(self, spider):
        spider.file.close()
        spider.file1.close()


class MfkRedisDownloaderMiddleware:
    def __init__(self, user_agent):
        self.user_agent = user_agent

    @classmethod
    def from_crawler(cls, crawler):
        return cls(
            user_agent=crawler.settings.get('USER_AGENTS')
        )

    def process_request(self, request, spider):
        agent = random.choices(self.user_agent)
        request.headers['User-Agent'] = agent