import json
import os
import random
import re
import time
import threading  # 导入线程模块
from urllib.parse import urljoin, urlparse, urlunparse
from bs4 import BeautifulSoup
from playwright.sync_api import sync_playwright
from trafilatura import extract
import requests
from playwright_stealth import stealth_sync

# 假设 application.settings 存在，如果没有，请取消注释下一行并设置正确的路径
# JS_PATH = os.path.join(os.path.dirname(__file__), "js_files") # 示例路径
# 请确保 JS_PATH 和 "roll.js" 实际存在
try:
    from application.settings import JS_PATH
except ImportError:
    # 假设一个合理的默认路径，以便代码运行
    print("警告: 无法导入 application.settings.JS_PATH。使用默认路径。")
    JS_PATH = os.path.dirname(os.path.abspath(__file__))  # 使用当前文件所在目录


class CrawlerBrowser:
    headless = False
    args = [
        "--disable-gpu",
        "--disable-blink-features=AutomationControlled",
        "--disable-infobars",
        "--window-size=1280,800",
        "--blink-settings=imagesEnabled=false",
        "--disable-remote-fonts",
        "--disable-images",
        "--disable-software-rasterizer",
        "--disable-dev-shm-usage",
        "--disable-background-timer-throttling",
        "--disable-backgrounding-occluded-windows",
        "--disable-breakpad",
        "--disable-client-side-phishing-detection",
        "--disable-component-extensions-with-background-pages",
        "--disable-default-apps",
        "--disable-extensions",
        "--disable-features=TranslateUI",
        "--disable-hang-monitor",
        "--disable-ipc-flooding-protection",
        "--disable-popup-blocking",
        "--disable-prompt-on-repost",
        "--disable-sync",
        "--metrics-recording-only",
        "--no-first-run",
        "--force-color-profile=srgb",
    ]

    # **重构点：使用线程安全的 Semaphore 替换 max_task_size**
    MAX_BROWSER_TASKS = 5  # 最大并发浏览器任务数
    # 类级别信号量，用于控制 Playwright 浏览器的并发数量
    BROWSER_SEMAPHORE = threading.Semaphore(MAX_BROWSER_TASKS)

    # 模拟常见浏览器的 Headers，用于反爬虫
    DEFAULT_HEADERS = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36',
        'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
        'Accept-Language': 'en-US,en;q=0.9,zh-CN;q=0.8,zh;q=0.7',
        'Accept-Encoding': 'gzip, deflate, br',
        'Connection': 'keep-alive',
        'DNT': '1',  # Do Not Track
        'Upgrade-Insecure-Requests': '1',
    }

    def __init__(self, use_requests=False):
        self.use_requests = use_requests

        # 确保 JS 文件存在并加载
        js_file_path = os.path.join(JS_PATH, "roll.js")
        if not os.path.exists(js_file_path):
            # 如果文件不存在，设置为空字符串或抛出错误
            print(f"警告: 找不到 JS 文件: {js_file_path}。JS 注入将无效。")
            self.js_code = ""
        else:
            with open(js_file_path, "r", encoding="utf-8") as f:
                self.js_code = f.read()

    def process(self, url, breadth=30):
        # Python 3.10+ 的 match/case 结构
        match self.use_requests:
            case False:
                html = self.request_parsing(url)
            case True:
                html = self.brower_parsing(url)
            case _:
                raise ValueError("请选择正确的搜索器")

        if not html:
            print(f"未能从 {url} 获取到 HTML 内容。")
            return {"text_with_meta": None, "url_list": set()}

        # 解析正文（此操作不依赖 page 对象）
        text_with_meta = extract(
            html,
            with_metadata=True,
            include_links=True,
            include_comments=True,
            output_format="json",
        )
        url_list = self.get_html_urls(url, html, breadth)
        if text_with_meta:  # trafilatura 错误处理
            try:
                text_with_meta = json.loads(text_with_meta)
            except Exception:
                # 如果不是有效的 JSON，保留原始文本
                pass
        return {"text_with_meta": text_with_meta, "url_list": url_list}

    def brower_parsing(self, url):
        html_content = ""
        print(f"正在爬取: {url} 浏览器模式...")

        # **重构点：使用信号量控制并发。with 语句确保任务结束后自动释放信号量。**
        with CrawlerBrowser.BROWSER_SEMAPHORE:
            with sync_playwright() as p:
                browser = None
                try:
                    browser = p.chromium.launch(
                        headless=self.headless, args=self.args)
                    context = browser.new_context()
                    page = context.new_page()
                    # ✨ 应用 stealth 技术
                    stealth_sync(page)

                    try:
                        page.goto(url, timeout=20000,
                                  wait_until="domcontentloaded")
                    except Exception as e:
                        print(f"Playwright goto 失败: {e}")
                        # 即使超时，也尝试获取内容
                        pass

                    if self.js_code:  # 仅当代码加载成功时才注入
                        try:
                            page.evaluate(self.js_code)
                        except Exception as e:
                            print(f"JS 注入失败: {e}")

                    time.sleep(random.uniform(3, 4))  # 等待动态内容加载

                    # 在 Playwright 会话仍然活跃时提取内容
                    html_content = page.content()

                except Exception as e:
                    print(f"Playwright 启动或执行失败: {e}")
                finally:
                    # 确保无论成功还是失败，浏览器都被关闭
                    if browser and browser.is_connected():
                        browser.close()

        return html_content  # 信号量在 with 块结束时自动释放

    def request_parsing(self, url):
        """
        使用 requests 库获取页面 HTML，并携带 Headers 尝试反爬虫。
        此方法不使用 BROWSER_SEMAPHORE，因为 requests 不涉及浏览器资源。
        """
        try:
            print(f"正在爬取: {url} 网络请求模式...")
            # 使用 Session 来保持会话和自动处理 cookies
            with requests.Session() as session:
                session.headers.update(self.DEFAULT_HEADERS)

                response = session.get(
                    url,
                    timeout=(30, 45),  # (连接超时, 读取超时)
                    allow_redirects=True  # 自动处理重定向
                )

                # 检查请求是否成功
                response.raise_for_status()

                # 确保内容是 HTML
                if 'text/html' not in response.headers.get('Content-Type', ''):
                    print(f"警告: {url} 返回的不是 HTML 内容。")
                    return ""  # 返回空字符串

                # 自动处理编码
                response.encoding = response.apparent_encoding or 'utf-8'

                return response.text

        except requests.exceptions.RequestException as e:
            print(f"Requests 请求失败 (URL: {url}): {e}")
            return ""  # 发生异常时返回空字符串
        except Exception as e:
            print(f"request_parsing 发生未知错误: {e}")
            return ""

    def get_html_urls(self, url, content, breadth):
        base_domain = urlparse(url).netloc.lower()  # 当前域名
        soup = BeautifulSoup(content, "html.parser")
        links = []
        for a in soup.find_all("a", href=True):
            abs_url = urljoin(url, a["href"])  # 拼成绝对路径

            # 使用过滤器过滤
            cleaned_url = self.filter_special_url(abs_url)
            if not cleaned_url:
                continue

            # 只保留同域名且非空链接
            if urlparse(cleaned_url).netloc.lower() == base_domain and cleaned_url != url:
                links.append(cleaned_url)

        links = list(set(links))  # 去重
        random.shuffle(links)
        links = links[:breadth]
        return set(links)

    @staticmethod
    def filter_special_url(url: str):
        r"""
        判断并过滤特殊或无效的 URL。

        该函数用于检测并过滤掉不适合进一步访问或抓取的 URL，
        例如脚本链接、登录接口、媒体文件、下载链接等。
        若 URL 合法且值得保留，则返回去除 fragment 的规范化字符串；
        否则返回 ``None``。
        """
        if not url:
            return None

        url = url.strip()
        if not url:
            return None

        lowered = url.lower()

        # === 1. 非网页协议过滤 ===
        forbidden_schemes = (
            "javascript:", "mailto:", "tel:", "data:",
            "ftp:", "file:", "blob:", "ws:", "wss:"
        )
        if lowered.startswith(forbidden_schemes):
            return None

        # === 2. 空或仅锚点 ===
        if lowered.startswith("#"):
            return None

        # === 3. 敏感关键字过滤 ===
        # login/signup/password/callback/token 等
        bad_keyword_pattern = re.compile(
            r'(^|[\W_])('
            r'submit|login|signin|signup|register|logout|auth|oauth|session|callback|redirect|token|'
            r'passwd|password|reset|verify|confirmation|confirm'
            r')([\W_]|$)',
            flags=re.IGNORECASE
        )
        if bad_keyword_pattern.search(url):
            return None

        # === 4. 媒体与静态资源扩展名过滤 ===
        media_extensions = {
            # images
            ".jpg", ".jpeg", ".png", ".gif", ".webp", ".svg", ".ico", ".bmp", ".tif", ".tiff", ".avif",
            # audio
            ".mp3", ".wav", ".ogg", ".m4a", ".aac", ".flac",
            # video
            ".mp4", ".mov", ".webm", ".ogv", ".mkv", ".avi", ".wmv", ".flv",
            # documents
            ".xls", ".xlsx", ".csv", ".xml", ".doc", ".docx", ".pdf", ".ppt", ".pptx",
            # archives
            ".zip", ".rar", ".7z", ".tar", ".gz", ".bz2",
            # frontend assets
            ".css", ".js", ".map", ".json",
            # fonts
            ".woff", ".woff2", ".ttf", ".eot", ".otf", ".sfnt",
            # others (binary / executable)
            ".exe", ".dll", ".bin", ".apk", ".dmg", ".iso",
        }

        parsed = urlparse(url)
        scheme = parsed.scheme.lower()

        # === 5. 非 HTTP(S) 协议过滤（允许相对路径） ===
        if scheme and scheme not in ("http", "https"):
            return None

        path_lower = (parsed.path or "").lower()

        # 检查路径扩展名
        for ext in media_extensions:
            if path_lower.endswith(ext):
                return None

        # === 6. 去除 fragment ===
        cleaned = parsed._replace(fragment="")
        result = urlunparse(cleaned)

        # 若去除 fragment 后为空，则直接忽略
        return result if result.strip() else None


if __name__ == "__main__":
    # --- 示例用法 (请确保您已安装 playwright, requests, trafilatura, bs4, playwright-stealth) ---

    # 示例 1: 使用 Requests 模式
    # 注意：requests 模式不使用并发控制
    requests_browser = CrawlerBrowser(use_requests=True)
    requests_data = requests_browser.process(
        "https://www.grandviewresearch.com/industry-analysis/rare-cell-isolation-market-report")
    print("\n--- Requests Mode Result ---")
    print(
        f"Title: {requests_data['text_with_meta'].get('title') if requests_data['text_with_meta'] else 'N/A'}")
    print(f"Found URLs: {len(requests_data['url_list'])}")
    print("-" * 30)

    # 示例 2: 使用 Playwright 模式 (如果需要测试并发，需要使用多线程调用)
    # playwright_browser = CrawlerBrowser(use_requests=False)
    # playwright_data = playwright_browser.process("https://www.example.com")
    # print("\n--- Playwright Mode Result ---")
    # print(playwright_data.get("text_with_meta"))
