import scrapy
from scrapy.exceptions import CloseSpider
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
import time
import os
from urllib.parse import urlparse


class HouseSpider(scrapy.Spider):
    name = "house"
    allowed_domains = ["wuhan.anjuke.com"]
    start_urls = ["https://wuhan.anjuke.com/sale/p1"]

    # 控制爬取页数
    max_pages = 2  # 设置为None则爬取所有页

    # 状态标记
    VERIFIED = False
    CURRENT_URL = None

    custom_settings = {
        'DOWNLOAD_DELAY': 3,
        'AUTOTHROTTLE_ENABLED': True,
        'ROBOTSTXT_OBEY': False,
        'COOKIES_ENABLED': True,
        'DEFAULT_REQUEST_HEADERS': {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
            'Accept-Language': 'zh-CN,zh;q=0.9',
        },
    }

    def __init__(self):
        # 初始化浏览器选项
        chrome_options = Options()
        chrome_options.add_argument("--disable-blink-features=AutomationControlled")
        chrome_options.add_experimental_option("excludeSwitches", ["enable-automation"])
        chrome_options.add_experimental_option('useAutomationExtension', False)

        # 设置无头模式（可选）
        # chrome_options.add_argument("--headless")

        self.driver_path = self.get_chromedriver_path()
        self.browser = webdriver.Chrome(
            executable_path=self.driver_path,
            options=chrome_options
        )
        self.browser.maximize_window()
        super().__init__()

    def get_chromedriver_path(self):
        """获取chromedriver路径"""
        # 尝试自动查找或指定路径
        if os.name == 'nt':  # Windows
            paths = [
                'chromedriver.exe',
                'C:/chromedriver.exe',
                '/usr/local/bin/chromedriver'
            ]
        else:  # Linux/Mac
            paths = [
                'chromedriver',
                '/usr/local/bin/chromedriver',
                '/usr/bin/chromedriver'
            ]

        for path in paths:
            if os.path.exists(path):
                return path

        raise Exception("未找到chromedriver，请下载并配置路径: https://chromedriver.chromium.org/")

    def detect_verification(self, response):
        """检测验证页面"""
        verification_keywords = [
            '验证码', 'CAPTCHA', '安全验证',
            '人机验证', '滑块验证', 'verify',
            '请输入验证码', 'Verify you are human'
        ]

        # 检查响应文本和常见验证元素
        if any(keyword in response.text for keyword in verification_keywords):
            return True

        # 检查常见验证码元素
        captcha_selectors = [
            '//div[contains(@class, "captcha")]',
            '//iframe[contains(@src, "captcha")]',
            '//div[contains(@id, "captcha")]',
            '//div[contains(@class, "geetest")]'
        ]

        for selector in captcha_selectors:
            if response.xpath(selector):
                return True

        return False

    def manual_verification(self, url):
        """人工验证处理"""
        print("\n⚠️ 检测到反爬验证，需要人工处理 ⚠️")
        print(f"正在打开浏览器: {url}")

        try:
            # 打开验证页面
            self.browser.get(url)
            print("\n请在浏览器中完成验证:")
            print("1. 完成所有验证步骤")
            print("2. 确保页面正常显示后")
            print("3. 返回此窗口按回车键继续\n")

            # 等待用户完成验证
            input("完成验证后按回车键继续爬取...")

            # 获取验证后的cookies
            cookies = {}
            for cookie in self.browser.get_cookies():
                cookies[cookie['name']] = cookie['value']

            return cookies

        except Exception as e:
            self.logger.error(f"验证过程中出错: {e}")
            raise CloseSpider("验证失败")

    def parse(self, response):
        # 保存当前URL用于重试
        self.CURRENT_URL = response.url

        # 检测是否需要验证
        if self.detect_verification(response) and not self.VERIFIED:
            cookies = self.manual_verification(response.url)
            self.VERIFIED = True

            # 携带新cookies重新请求
            yield scrapy.Request(
                response.url,
                callback=self.parse,
                cookies=cookies,
                dont_filter=True,
                meta={'verified': True}
            )
            return

        # 正常解析逻辑
        node_list = response.xpath('//div[@class="property"]')
        for node in node_list:
            item = {}

            # 提取数据字段
            item['img_url'] = node.xpath('.//img[@class="lazy-img cover"]/@src').get()
            item['detail_url'] = node.xpath('.//a[@class="property-ex"]/@href').get()
            item['name'] = node.xpath('.//h3[@class="property-content-title-name"]/text()').get()

            # 价格处理
            price_text = node.xpath('.//span[@class="property-price-total-num"]/text()').get()
            item['price'] = float(price_text) if price_text and price_text.replace('.', '').isdigit() else 0

            # 其他字段提取...

            yield item

        # 翻页逻辑
        if self.max_pages is None or response.meta.get('page', 1) < self.max_pages:
            next_page = response.xpath('//a[contains(@class, "next") and contains(@class, "next-active")]/@href').get()
            if next_page:
                next_url = response.urljoin(next_page)
                yield scrapy.Request(
                    next_url,
                    callback=self.parse,
                    meta={'page': response.meta.get('page', 1) + 1}
                )

    def closed(self, reason):
        """爬虫结束时关闭浏览器"""
        if hasattr(self, 'browser'):
            self.browser.quit()
        self.logger.info(f"爬虫已关闭，原因: {reason}")


# 运行爬虫的代码（如果需要直接运行）
if __name__ == "__main__":
    from scrapy.crawler import CrawlerProcess
    from scrapy.utils.project import get_project_settings

    process = CrawlerProcess(get_project_settings())
    process.crawl(HouseSpider)
    process.start()