# Define here the models for your spider middleware
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
import time

from fake_useragent import UserAgent
from scrapy import signals, Selector
from itemadapter import is_item, ItemAdapter
from scrapy.http import Response
from selenium import webdriver
from scrapy.http import HtmlResponse
from selenium.webdriver.common.by import By
from utils import create_chrome_driver, get_cookies_dict




class SpiderDownloaderMiddleware:

    def __init__(self):
        self.sel = None
        self.login = None
        self.chrome = create_chrome_driver()
        self.login_url = 'https://login.taobao.com/member/login.jhtml'
        self.scrollDown = "window.scrollTo(0, document.body.scrollHeight)"

    @classmethod
    def from_crawler(cls, crawler):
        # This method is used by Scrapy to create your spiders.
        s = cls()
        crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
        crawler.signals.connect(s.spider_closed, signal=signals.spider_closed)
        return s
    
    
    def spider_closed(self, spider):
        self.chrome.quit()

    def process_request(self, request: Response, spider):
        self.chrome.get(request.url)
        self.chrome.implicitly_wait(10)
        if self.login is True:
            time.sleep(3)
            self.chrome.execute_script(self.scrollDown)
            time.sleep(10)
            return HtmlResponse(self.chrome.current_url, body=self.chrome.page_source, encoding='utf-8',
                                request=request)
        self.chrome.find_element(By.XPATH, '//*[@id="fm-login-id"]').send_keys('15112684099')
        time.sleep(2)
        self.chrome.find_element(By.XPATH, '//*[@id="fm-login-password"]').send_keys('goodix**123')
        time.sleep(2)
        self.chrome.find_element(By.XPATH, '//*[@id="login-form"]/div[6]/button').click()
        time.sleep(3)
        self.chrome.execute_script(self.scrollDown)
        time.sleep(10)
        return HtmlResponse(self.chrome.current_url, body=self.chrome.page_source, encoding='utf-8', request=request)


    def process_response(self, request, response, spider):
        self.sel = Selector(response)
        login_name = self.sel.xpath('//*[@id="J_SiteNavLogin"]/div[1]/div/a/text()').extract_first()
        print("翻页提示，登录信息", login_name)
        if login_name is not None:
            self.login = True
            return response

    def process_exception(self, request, exception, spider):

        pass

    def spider_opened(self, spider):
        spider.logger.info("Spider opened: %s" % spider.name)


class RandomUserAgentMiddleware(object):
    # 随机更换 user_agent
    def __init__(self, srawler):
        super(RandomUserAgentMiddleware, self).__init__()
        self.ua = UserAgent()

    @classmethod
    def from_crawler(cls, crawler):
        return cls(crawler)

    def process_request(self, request, spider):
        def get_ua():
            request.headers.setdefault('User-Agent', self.ua.random)
