# -*- coding: utf-8 -*-

# Define here the models for your spider middleware
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html

from scrapy import signals
import time
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from scrapy.http import HtmlResponse
import employment.utils as utils
import json


chrome_options = Options()
# chrome_options.add_argument("--headless")  # 使用无头浏览器
# chrome_options.add_argument("--disable-gpu")
chrome_options.add_argument(
    'user-agent="Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.198 Safari/537.36"')


class EmploymentSpiderMiddleware(object):
    # Not all methods need to be defined. If a method is not defined,
    # scrapy acts as if the spider middleware does not modify the
    # passed objects.

    @classmethod
    def from_crawler(cls, crawler):
        # This method is used by Scrapy to create your spiders.
        s = cls()
        crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
        return s

    def process_spider_input(self, response, spider):
        # Called for each response that goes through the spider
        # middleware and into the spider.

        # Should return None or raise an exception.
        return None

    def process_spider_output(self, response, result, spider):
        # Called with the results returned from the Spider, after
        # it has processed the response.

        # Must return an iterable of Request, dict or Item objects.
        for i in result:
            yield i

    def process_spider_exception(self, response, exception, spider):
        # Called when a spider or process_spider_input() method
        # (from other spider middleware) raises an exception.

        # Should return either None or an iterable of Request, dict
        # or Item objects.
        pass

    def process_start_requests(self, start_requests, spider):
        # Called with the start requests of the spider, and works
        # similarly to the process_spider_output() method, except
        # that it doesn’t have a response associated.

        # Must return only requests (not items).
        for r in start_requests:
            yield r

    def spider_opened(self, spider):
        spider.logger.info('Spider opened: %s' % spider.name)


class EmploymentDownloaderMiddleware(object):
    # Not all methods need to be defined. If a method is not defined,
    # scrapy acts as if the downloader middleware does not modify the
    # passed objects.

    spider_need_login = [
        'zhaopin'
    ]  # 需要登录的爬虫的名字
    isLogin = False  # 是否已经登录

    homePageHandle = None  # 主页（即目录）的句柄
    subPageHandle = None  # 子页面（即详情页）的句柄

    def __init__(self):
        self.browser = webdriver.Chrome(options=chrome_options)
        # self.browser = webdriver.Chrome()  # 创建普通chrome浏览器，会展示出来以便调试
        self.browser.execute_cdp_cmd("Page.addScriptToEvaluateOnNewDocument", {
            "source": """
            Object.defineProperty(navigator, 'webdriver', {
            get: () => undefined
            })
        """
        })  # 在开启浏览器之前执行脚本去除selenium特征避免被发现是爬虫
        self.loadTime = 3  # 留给浏览器的加载时间

        self.homePageHandle = self.browser.current_window_handle
        js = 'window.open("https://www.baidu.com/")'
        self.browser.execute_script(js)  # 新建一个窗口

        time.sleep(self.loadTime)  # 等新窗口创建好

        all_handles = self.browser.window_handles
        self.subPageHandle = all_handles[-1]

    def login(self, websiteName: str):
        '''
        利用cookies登录
        @param websiteName:网站名字，目前支持的网站：'zhaopin'(智联招聘)
        '''
        with open(websiteName+'.json', 'r', encoding='utf8') as f:
            listCookies = json.loads(f.read())
        print('===============================================cookies===============================')
        print(listCookies)
        print('===============================================cookies===============================')
        for cookie in listCookies:
            self.browser.add_cookie(cookie)
        self.isLogin = True

    def __del__(self):
        self.browser.close()
        # 爬取结束

    @classmethod
    def from_crawler(cls, crawler):
        # This method is used by Scrapy to create your spiders.
        s = cls()
        crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
        return s

    def process_request(self, request, spider):
        # Called for each request that goes through the downloader
        # middleware.

        # Must either:
        # - return None: continue processing this request
        # - or return a Response object
        # - or return a Request object
        # - or raise IgnoreRequest: process_exception() methods of
        #   installed downloader middleware will be called

        # 如果meta中存在item，则代表需要爬取详情页，在新的页面打开并爬取
        if request.meta.get('item') != None:
            print('===============================子页面======================')

            self.browser.switch_to.window(self.subPageHandle)

            self.browser.get(request.url)  # 下载
            time.sleep(self.loadTime)
            # 获取渲染后的数据
            page_text = self.browser.page_source

        else:
            # 爬取目录页

            self.browser.switch_to.window(self.homePageHandle)

            time.sleep(self.loadTime)  # 给浏览器加载数据的时间

            if request.meta.get('next_page_css') != None:
                # 如果存在，则说明需要翻页
                # 并不下载，因为下载之后又会回到第一页
                nextPageBtn = self.browser.find_element_by_css_selector(
                    request.meta['next_page_css'])

                self.browser.execute_script(
                    "arguments[0].click();", nextPageBtn)
                # nextPageBtn.click()
                time.sleep(self.loadTime)  # 给浏览器加载数据的时间
                page_text = self.browser.page_source
            else:
                self.browser.get(request.url)
                if spider.name in self.spider_need_login and not self.isLogin:
                    # 如果该爬虫需要登录且未登录
                    # self.login(spider.name)
                    # self.browser.refresh()  # 刷新
                    input('请手动登录，登录好了之后输入1：')
                    self.isLogin = True
                    # 顺便保存一下cookies
                    dictCookies = self.browser.get_cookies()
                    jsonCookies = json.dumps(
                        dictCookies, sort_keys=True, indent=2)
                    print(jsonCookies)
                    with open(spider.name+'.json', 'w') as f:
                        f.write(jsonCookies)
                        print('cookies保存成功')

                time.sleep(self.loadTime)  # 给浏览器加载数据的时间
                # self.browser.implicitly_wait(self.loadTime)
                # 获取渲染后的数据
                page_text = self.browser.page_source
                # 篡改响应对象
        return HtmlResponse(url=self.browser.current_url, body=page_text, encoding='utf-8', request=request)

    def process_response(self, request, response, spider):
        # Called with the response returned from the downloader.

        # Must either;
        # - return a Response object
        # - return a Request object
        # - or raise IgnoreRequest
        return response

    def process_exception(self, request, exception, spider):
        # Called when a download handler or a process_request()
        # (from other downloader middleware) raises an exception.

        # Must either:
        # - return None: continue processing this exception
        # - return a Response object: stops process_exception() chain
        # - return a Request object: stops process_exception() chain
        pass

    def spider_opened(self, spider):
        spider.logger.info('Spider opened: %s' % spider.name)
