
# Define here the models for your spider middleware
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
import random
import time  # 新增：导入time模块（用于sleep）
from selenium import webdriver
from scrapy import signals
from scrapy.http import HtmlResponse  # 新增：导入HtmlResponse（用于返回页面源码）

# 新增：导入Selenium等待模块（WebDriverWait用于显式等待）
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.chrome.service import Service

# useful for handling different item types with a single interface
from itemadapter import ItemAdapter


class QimairankSpiderMiddleware:
    # 保持不变，无需修改
    @classmethod
    def from_crawler(cls, crawler):
        s = cls()
        crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
        return s

    def process_spider_input(self, response, spider):
        return None

    def process_spider_output(self, response, result, spider):
        for i in result:
            yield i

    def process_spider_exception(self, response, exception, spider):
        pass

    async def process_start(self, start):
        async for item_or_request in start:
            yield item_or_request

    def spider_opened(self, spider):
        spider.logger.info("Spider opened: %s" % spider.name)


class QimairankDownloaderMiddleware:
    # 保持不变，无需修改
    @classmethod
    def from_crawler(cls, crawler):
        s = cls()
        crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
        return s

    def process_request(self, request, spider):
        return None

    def process_response(self, request, response, spider):
        return response

    def process_exception(self, request, exception, spider):
        pass

    def spider_opened(self, spider):
        spider.logger.info("Spider opened: %s" % spider.name)


class RandomUserAgent(object):
    # 保持不变，无需修改
    def __init__(self, agents):
        self.agents = agents

    @classmethod
    def from_crawler(cls, crawler):
        return cls(crawler.settings.getlist('USER_AGENTS'))

    def process_request(self, request, spider):
        request.headers.setdefault('User-Agent', random.choice(self.agents))


class SeleniumMiddleware(object):
    def __init__(self):
        self.timeout = 50
        self.login_required = True
        self.logged_in = False
        
        # 初始化 Safari 浏览器（无需驱动，macOS 自带）
        self.browser = webdriver.Safari()  # Safari 无需额外驱动
        self.browser.set_window_size(1400, 700)  # 设置窗口大小
        self.browser.set_page_load_timeout(self.timeout)  # 页面加载超时
        self.wait = WebDriverWait(self.browser, self.timeout)  # 显式等待

    def login(self, spider):
        """执行登录操作"""
        try:
            spider.logger.info("开始登录七麦网站...")
            self.browser.get("https://www.qimai.cn/account/signin")
            time.sleep(3)
            
            # 获取登录凭据
            username = getattr(spider, 'username', None) or spider.settings.get('LOGIN_USERNAME')
            password = getattr(spider, 'password', None) or spider.settings.get('LOGIN_PASSWORD')
            
            if not username or not password:
                spider.logger.error("未提供登录凭据，请在settings.py中设置LOGIN_USERNAME和LOGIN_PASSWORD")
                return False
            
            try:
                # 查找用户名输入框
                username_input = self.browser.find_element("name", "username")
                username_input.clear()
                username_input.send_keys(username)
                
                # 查找密码输入框
                password_input = self.browser.find_element("name", "password")
                password_input.clear()
                password_input.send_keys(password)
                
                # 查找并点击登录按钮
                login_button = self.browser.find_element("css selector", "button[type='submit']")
                login_button.click()
                
                # 等待登录完成
                time.sleep(5)
                
                # 检查登录是否成功
                if "account/signin" not in self.browser.current_url:
                    spider.logger.info("登录成功")
                    self.logged_in = True
                    return True
                else:
                    spider.logger.error("登录失败，请检查用户名和密码")
                    return False
                    
            except Exception as e:
                spider.logger.error(f"登录过程中出错: {str(e)}")
                return False
                
        except Exception as e:
            spider.logger.error(f"访问登录页面失败: {str(e)}")
            return False

    def process_request(self, request, spider):
        """处理请求，包含登录逻辑"""
        # 如果需要登录且未登录，先执行登录
        if self.login_required and not self.logged_in:
            if not self.login(spider):
                spider.logger.error("登录失败，停止爬取")
                return None
        
        # 加载请求的 URL
        if self.browser.current_url != request.url:
            self.browser.get(request.url)
            time.sleep(5)  # 等待页面加载
            
        # 检查是否被重定向到登录页（会话过期）
        if "account/signin" in self.browser.current_url:
            spider.logger.warning("会话过期，重新登录...")
            self.logged_in = False
            if self.login(spider):
                self.browser.get(request.url)
                time.sleep(3)
            else:
                return None
        
        # 返回页面源码给 Scrapy
        return HtmlResponse(
            url=self.browser.current_url,
            body=self.browser.page_source.encode("utf-8"),
            encoding="utf-8",
            request=request
        )

    def spider_closed(self, spider):
        """爬虫结束关闭浏览器"""
        if self.browser:
            self.browser.quit()
            spider.logger.info("Safari browser closed.")

    @classmethod
    def from_crawler(cls, crawler):
        s = cls()
        crawler.signals.connect(s.spider_closed, signal=signals.spider_closed)
        return s
