import os
import base64 
import aiohttp
import aiofiles
import urllib.parse
from typing import Optional, Dict, Any
from playwright.async_api import Page 
from unpywall import Unpywall
from unpywall.utils import UnpywallCredentials
from bs4 import BeautifulSoup

from app.services.doi_parser import DOIParser
from app.core.config import settings  # 导入settings
import async_timeout
import re
import asyncio
from urllib.parse import urljoin
from app.services.db_service_factory import get_db_service
from app.models.database import NMPDDB, NMPDErrorDB
from playwright.async_api import BrowserContext 

db_service = get_db_service()

headers = {
    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36 Edg/131.0.0.0",
    "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8",
    "Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
    "Accept-Encoding": "gzip, deflate, br",
    "Connection": "keep-alive",
    "Upgrade-Insecure-Requests": "1",
    "Sec-Fetch-Dest": "document",
    "Sec-Fetch-Mode": "navigate",
    "Sec-Fetch-Site": "none",
    "Sec-Fetch-User": "?1",
    "Cache-Control": "max-age=0",
    "sec-ch-ua": '"Not_A Brand";v="8", "Chromium";v="120", "Google Chrome";v="120"',
    "sec-ch-ua-mobile": "?0",
    "sec-ch-ua-platform": '"Windows"',
}


class DOIDownloader:
    BASE_URL = "https://doi.org/"
    CROSSREF_API = "https://api.crossref.org/works/"

    @staticmethod
    async def download_paper(
        session: aiohttp.ClientSession,  # 添加session参数
        task_id: str,
        doi: str,
        save_dir: str,
        check_revoked=None,
    ) -> tuple[bool, Optional[str], Optional[str], Dict[str, Any]]:
        try:
            if check_revoked and await check_revoked(task_id):  # 修改为await
                print(f"任务已被撤销，停止下载 DOI: {doi}")
                return False, None, "任务已被撤销", {}
            metadata = await DOIDownloader.get_paper_metadata(session, doi)
            metadata["year"] = str(metadata["year"])
            metadata["status"] = "downloading"
            await DOIParser.update_doi_metater(task_id, doi, metadata)

            title = metadata["title"]

            curr_pdf_url = ""
            # 设置Unpaywall认证
            UnpywallCredentials("lvyi21202301@gmail.com")  # 替换为你的邮箱

            # 在每个主要步骤前检查任务是否被撤销
            if check_revoked and await check_revoked(task_id):
                print(f"任务已被撤销，停止下载 DOI: {doi}")
                return False, None, "任务已被撤销", {}

            # 1. 尝试从Sci-Hub下载
            print(f"【1】尝试从Sci-Hub下载:{doi}")
            success, file_path, error, url, status = (
                await DOIDownloader._try_scihub_method(
                    session, task_id, doi, save_dir, title, check_revoked
                )
            )
            if success:
                await DOIParser.update_doi_metater(
                    task_id, doi, {"status": "success", "pdf_url": url}
                )
                return success, file_path, error, metadata
            if status == 403:
                # 这种情况先把网络路径赋值
                curr_pdf_url = url

            # 1. 首先尝试通过Unpaywall获取PDF链接
            pdf_url = await DOIDownloader._get_unpaywall_pdf(doi)
            print(f"【2】开始下载:{doi}")
            if pdf_url:
                success, file_path, error = await DOIDownloader._download_from_url(
                    session, pdf_url, doi, save_dir, title
                )
                if success:
                    curr_pdf_url = pdf_url
                    await DOIParser.update_doi_metater(
                        task_id, doi, {"status": "success", "pdf_url": pdf_url}
                    )
                    return success, file_path, error, metadata

            # 2. 如果Unpaywall失败，尝试通过Crossref获取信息
            publisher_info = await DOIDownloader._get_crossref_info(session, doi)

            print(f"【3】开始下载:{doi}")
            if publisher_info:
                # 根据出版商定制下载策略
                pdf_url = await DOIDownloader._get_publisher_specific_pdf(
                    doi, publisher_info
                )
                if pdf_url:
                    success, file_path, error = await DOIDownloader._download_from_url(
                        session, pdf_url, doi, save_dir, title
                    )
                    if success:
                        curr_pdf_url = pdf_url
                        await DOIParser.update_doi_metater(
                            task_id, doi, {"status": "success", "pdf_url": pdf_url}
                        )
                        return success, file_path, error, metadata

            await DOIParser.update_doi_metater(
                task_id, doi, {"status": "error", "pdf_url": curr_pdf_url}
            )
            return False, None, f"尝试全部方法无法下载", {}

        except Exception as e:
            await DOIParser.update_doi_metater(
                task_id, doi, {"status": "error", "pdf_url": "", "error": str(e)}
            )
            return False, None, f"下载出错: {str(e)}", {}

    @staticmethod
    async def _get_unpaywall_pdf(doi: str) -> Optional[str]:
        """从Unpaywall获取PDF链接"""
        try:
            return Unpywall.get_pdf_link(doi=doi)
        except Exception:
            return None

    @staticmethod
    async def _get_crossref_info(
        session: aiohttp.ClientSession, doi: str
    ) -> Optional[Dict[str, Any]]:
        """从Crossref获取文章信息"""
        try:
            # async with aiohttp.ClientSession(headers=headers, trust_env=True,timeout=30) as session:
            url = f"{DOIDownloader.CROSSREF_API}{doi}"
            async with async_timeout.timeout(
                24000
            ):  # 或更大时间，只是为了触发 cancel 支持
                async with session.get(url, headers=headers) as response:
                    if response.status == 200:
                        data = await response.json()
                        return data.get("message", {})
            return None
        except Exception:
            return None

    @staticmethod
    async def _get_arxiv_abstract(
        session: aiohttp.ClientSession, doi: str
    ) -> Optional[str]:
        """从arXiv获取论文摘要

        arXiv API文档: https://arxiv.org/help/api/
        """
        # 将DOI转换为arXiv查询
        encoded_doi = urllib.parse.quote(f"doi:{doi}")
        url = f"http://export.arxiv.org/api/query?search_query={encoded_doi}&max_results=1"

        try:
            # async with aiohttp.ClientSession(headers=headers, trust_env=True) as session:
            async with async_timeout.timeout(
                24000
            ):  # 或更大时间，只是为了触发 cancel 支持
                async with session.get(url, ssl=False, timeout=30) as response:
                    if response.status == 200:
                        # arXiv返回XML格式数据
                        xml_data = await response.text()

                        # 简单解析XML以提取摘要
                        # 注意：在实际应用中，应该使用专门的XML解析库如xml.etree.ElementTree
                        abstract_start = xml_data.find("<summary>")
                        abstract_end = xml_data.find("</summary>")

                        if abstract_start > 0 and abstract_end > 0:
                            abstract_text = xml_data[
                                abstract_start + 9 : abstract_end
                            ].strip()
                            # 清理可能的HTML标签
                            abstract_text = re.sub(r"<[^>]+>", "", abstract_text)
                            return abstract_text

                    print(f"从arXiv获取摘要失败: {response.status}")
                    return None
        except Exception as e:
            print(f"从arXiv获取摘要时出错: {e}")
            return None

    @staticmethod
    async def _get_pubmed_abstract(
        session: aiohttp.ClientSession, doi: str
    ) -> Optional[str]:
        """从PubMed获取论文摘要

        使用NCBI E-utilities API: https://www.ncbi.nlm.nih.gov/books/NBK25500/
        """
        # 首先通过DOI查找PubMed ID (PMID)
        esearch_url = f"https://eutils.ncbi.nlm.nih.gov/entrez/eutils/esearch.fcgi?db=pubmed&term={doi}[DOI]&retmode=json"

        try:
            # async with aiohttp.ClientSession(headers=headers, trust_env=True) as session:
            # 步骤1: 通过DOI获取PMID
            async with async_timeout.timeout(
                24000
            ):  # 或更大时间，只是为了触发 cancel 支持
                async with session.get(esearch_url, ssl=False, timeout=30) as response:
                    if response.status == 200:
                        search_data = await response.json()
                        id_list = search_data.get("esearchresult", {}).get("idlist", [])

                        if not id_list:
                            print(f"在PubMed中未找到DOI: {doi}")
                            return None

                        pmid = id_list[0]  # 使用第一个匹配的ID

                        # 步骤2: 使用PMID获取摘要
                        efetch_url = f"https://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi?db=pubmed&id={pmid}&retmode=xml"
                        async with async_timeout.timeout(
                            24000
                        ):  # 或更大时间，只是为了触发 cancel 支持
                            async with session.get(
                                efetch_url, ssl=False, timeout=30
                            ) as fetch_response:
                                if fetch_response.status == 200:
                                    xml_data = await fetch_response.text()

                                    # 简单解析XML以提取摘要
                                    abstract_start = xml_data.find("<AbstractText>")
                                    abstract_end = xml_data.find("</AbstractText>")

                                    if abstract_start > 0 and abstract_end > 0:
                                        abstract_text = xml_data[
                                            abstract_start + 14 : abstract_end
                                        ].strip()
                                        # 清理可能的HTML标签
                                        abstract_text = re.sub(
                                            r"<[^>]+>", "", abstract_text
                                        )
                                        return abstract_text

                    print(f"从PubMed获取摘要失败: {response.status}")
                    return None
        except Exception as e:
            print(f"从PubMed获取摘要时出错: {e}")
            return None

    @staticmethod
    async def _get_semantic_scholar_abstract(
        session: aiohttp.ClientSession, doi: str
    ) -> Optional[str]:
        """从Semantic Scholar获取论文摘要

        Semantic Scholar API文档: https://api.semanticscholar.org/
        """
        url = f"https://api.semanticscholar.org/v1/paper/{doi}"

        try:
            # async with aiohttp.ClientSession(headers=headers, trust_env=True) as session:
            async with async_timeout.timeout(
                24000
            ):  # 或更大时间，只是为了触发 cancel 支持
                async with session.get(url, ssl=False, timeout=30) as response:
                    if response.status == 200:
                        data = await response.json()
                        abstract = data.get("abstract")
                        if abstract:
                            return abstract

                    print(f"从Semantic Scholar获取摘要失败: {response.status}")
                    return None
        except Exception as e:
            print(f"从Semantic Scholar获取摘要时出错: {e}")
            return None

    @staticmethod
    async def get_paper_metadata(
        session: aiohttp.ClientSession, doi: str
    ) -> Dict[str, Any]:
        """获取论文的元数据（标题、摘要、作者、期刊、发表年份）"""
        metadata = {
            "title": "",
            "abstract": "",
            "authors": [],
            "journal": "",
            "year": "",
            "doi": doi,
        }
        try:
            # 首先从Semantic Scholar获取信息
            # print(f"从Semantic Scholar获取元数据: {doi}")
            semantic_data = None
            try:
                # async with aiohttp.ClientSession(headers=headers, trust_env=True,timeout=30) as session:
                url = f"https://api.semanticscholar.org/v1/paper/{doi}"
                async with async_timeout.timeout(
                    24000
                ):  # 或更大时间，只是为了触发 cancel 支持
                    async with session.get(url, ssl=False, timeout=30) as response:

                        if response.status == 200:
                            semantic_data = await response.json()
                            print(f"成功从Semantic Scholar获取数据")
            except Exception as e:
                print(f"从Semantic Scholar获取数据失败: {e}")

            # 如果成功获取Semantic Scholar数据，提取元数据
            if semantic_data:
                # 提取标题
                if "title" in semantic_data and semantic_data["title"]:
                    metadata["title"] = semantic_data["title"]
                    # print(f"从Semantic Scholar获取到标题: {metadata['title']}")

                # 提取摘要
                if "abstract" in semantic_data and semantic_data["abstract"]:
                    metadata["abstract"] = semantic_data["abstract"]
                    # print(f"从Semantic Scholar获取到摘要")

                # 提取作者
                if "authors" in semantic_data and isinstance(
                    semantic_data["authors"], list
                ):
                    for author in semantic_data["authors"]:
                        if "name" in author:
                            metadata["authors"].append(author["name"])
                    # print(f"从Semantic Scholar获取到{len(metadata['authors'])}位作者")

                # 提取期刊
                if "venue" in semantic_data and semantic_data["venue"]:
                    metadata["journal"] = semantic_data["venue"]
                    # print(f"从Semantic Scholar获取到期刊: {metadata['journal']}")

                # 提取年份
                if "year" in semantic_data and semantic_data["year"]:
                    metadata["year"] = semantic_data["year"]
                    # print(f"从Semantic Scholar获取到年份: {metadata['year']}")

            # 如果Semantic Scholar没有提供完整数据，尝试从Crossref获取补充信息
            if (
                not metadata["title"]
                or not metadata["abstract"]
                or not metadata["authors"]
                or not metadata["journal"]
                or not metadata["year"]
            ):
                print(f"Semantic Scholar数据不完整，尝试从Crossref获取补充信息")
                crossref_data = await DOIDownloader._get_crossref_info(session, doi)

                if crossref_data:
                    # 补充标题
                    if (
                        not metadata["title"]
                        and "title" in crossref_data
                        and crossref_data["title"]
                    ):
                        metadata["title"] = (
                            crossref_data["title"][0]
                            if isinstance(crossref_data["title"], list)
                            else crossref_data["title"]
                        )
                        # print(f"从Crossref补充标题: {metadata['title']}")

                    # 补充摘要
                    if (
                        not metadata["abstract"]
                        and "abstract" in crossref_data
                        and crossref_data["abstract"]
                    ):
                        # Crossref API返回的摘要可能包含HTML标签，需要清理
                        abstract_text = crossref_data["abstract"]
                        # 移除常见的HTML标签
                        if abstract_text:
                            # 移除HTML标签
                            abstract_text = re.sub(r"<[^>]+>", "", abstract_text)
                            # 替换常见HTML实体
                            abstract_text = (
                                abstract_text.replace("&lt;", "<")
                                .replace("&gt;", ">")
                                .replace("&amp;", "&")
                            )
                            abstract_text = abstract_text.replace(
                                "&quot;", '"'
                            ).replace("&apos;", "'")
                            abstract_text = (
                                abstract_text.replace("&nbsp;", " ")
                                .replace("&ndash;", "–")
                                .replace("&mdash;", "—")
                            )
                            abstract_text = abstract_text.replace(
                                "&lsquo;", """).replace('&rsquo;', """
                            )
                            abstract_text = abstract_text.replace(
                                "&ldquo;", '"'
                            ).replace("&rdquo;", '"')
                            # 移除多余空白
                            abstract_text = re.sub(r"\s+", " ", abstract_text).strip()
                            metadata["abstract"] = abstract_text
                            # print(f"从Crossref补充摘要")

                    # 补充作者
                    if (
                        not metadata["authors"]
                        and "author" in crossref_data
                        and isinstance(crossref_data["author"], list)
                    ):
                        for author in crossref_data["author"]:
                            author_name = []
                            if "given" in author:
                                author_name.append(author["given"])
                            if "family" in author:
                                author_name.append(author["family"])
                            if author_name:
                                metadata["authors"].append(" ".join(author_name))
                        # print(f"从Crossref补充{len(metadata['authors'])}位作者")

                    # 补充期刊
                    if (
                        not metadata["journal"]
                        and "container-title" in crossref_data
                        and crossref_data["container-title"]
                    ):
                        metadata["journal"] = (
                            crossref_data["container-title"][0]
                            if isinstance(crossref_data["container-title"], list)
                            else crossref_data["container-title"]
                        )
                        # print(f"从Crossref补充期刊: {metadata['journal']}")

                    # 补充发表年份
                    if not metadata["year"]:
                        if (
                            "published" in crossref_data
                            and "date-parts" in crossref_data["published"]
                        ):
                            date_parts = crossref_data["published"]["date-parts"]
                            if date_parts and date_parts[0] and len(date_parts[0]) > 0:
                                metadata["year"] = date_parts[0][0]
                                # print(f"从Crossref补充年份: {metadata['year']}")
                        elif (
                            "created" in crossref_data
                            and "date-parts" in crossref_data["created"]
                        ):
                            date_parts = crossref_data["created"]["date-parts"]
                            if date_parts and date_parts[0] and len(date_parts[0]) > 0:
                                metadata["year"] = date_parts[0][0]
                                # print(f"从Crossref补充年份: {metadata['year']}")

            # 如果摘要仍然缺失，尝试从其他来源获取
            if not metadata["abstract"]:
                print(f"摘要仍然缺失，尝试从其他来源获取...")
                abstract_found = False

                # 1. 尝试从arXiv获取摘要
                if not abstract_found:
                    try:
                        arxiv_abstract = await DOIDownloader._get_arxiv_abstract(
                            session, doi
                        )
                        if arxiv_abstract:
                            print(f"从arXiv获取到摘要")
                            metadata["abstract"] = arxiv_abstract
                            abstract_found = True
                    except Exception as e:
                        print(f"从arXiv获取摘要失败: {e}")

                # 2. 尝试从PubMed获取摘要
                if not abstract_found:
                    try:
                        pubmed_abstract = await DOIDownloader._get_pubmed_abstract(
                            session, doi
                        )

                        if pubmed_abstract:
                            print(f"从PubMed获取到摘要")
                            metadata["abstract"] = pubmed_abstract
                            abstract_found = True
                    except Exception as e:
                        print(f"从PubMed获取摘要失败: {e}")

                # 如果所有尝试都失败，保持abstract为None
                if not abstract_found:
                    print(f"无法从任何来源获取摘要: {doi}")

        except Exception as e:
            print(f"获取元数据出错: {str(e)}")

        return metadata

    @staticmethod
    async def _get_publisher_specific_pdf(
        doi: str, publisher_info: Dict[str, Any]
    ) -> Optional[str]:
        """根据出版商信息获取PDF链接"""
        try:
            # 获取出版商信息
            publisher = publisher_info.get("publisher", "").lower()

            # 根据不同出版商构建PDF URL
            if "elsevier" in publisher or "sciencedirect" in publisher:
                # ScienceDirect策略
                return f"https://www.sciencedirect.com/science/article/pii/{doi.split('/')[-1]}/pdf"

            elif "springer" in publisher:
                # Springer策略
                return f"https://link.springer.com/content/pdf/{doi}.pdf"

            elif "wiley" in publisher:
                # Wiley策略
                return f"https://onlinelibrary.wiley.com/doi/pdf/{doi}"

            elif "ieee" in publisher:
                # IEEE策略
                return f"https://ieeexplore.ieee.org/stamp/stamp.jsp?arnumber={doi.split('/')[-1]}"

            elif "nature" in publisher:
                # Nature策略
                return f"https://www.nature.com/articles/{doi.split('/')[-1]}.pdf"

            elif "acs" in publisher or "american chemical society" in publisher:
                # ACS策略
                return f"https://pubs.acs.org/doi/pdf/{doi}"

            elif "rsc" in publisher or "royal society of chemistry" in publisher:
                # RSC策略
                return f"https://pubs.rsc.org/en/content/articlepdf/{doi}"

            # 可以继续添加更多出版商的处理逻辑

            return None
        except Exception:
            return None

    @staticmethod
    async def _download_from_url(
        session: aiohttp.ClientSession, url: str, doi: str, save_dir: str, title: str
    ) -> tuple[bool, Optional[str], Optional[str]]:
        """从URL下载PDF"""
        try:
            # async with aiohttp.ClientSession(headers=headers, trust_env=True,timeout=30) as session:
            async with async_timeout.timeout(
                24000
            ):  # 或更大时间，只是为了触发 cancel 支持
                async with session.get(url, headers=headers, ssl=False) as response:
                    if (
                        response.status == 200
                        and "application/pdf"
                        in response.headers.get("Content-Type", "")
                    ):
                        file_name = await DOIDownloader._init_file_name(
                            title + " | " + doi
                        )
                        file_path = os.path.join(save_dir, file_name)

                        async with aiofiles.open(file_path, "wb") as f:
                            await f.write(await response.read())

                        return True, file_path, None
                    else:
                        return (
                            False,
                            None,
                            f"无法下载PDF: HTTP {response.status}, Content-Type: {response.headers.get('Content-Type')}",
                        )
        except Exception as e:
            return False, None, f"下载URL出错: {str(e)}"

    @staticmethod
    async def _try_scihub_base_method(
        session: aiohttp.ClientSession,
        task_id: str,
        doi: str,
        save_dir: str,
        title: str,
        check_revoked=None,
    ) -> tuple[bool, Optional[str], Optional[str], str]:
        """从Sci-Hub下载PDF"""
        try:
            url = ""
            status = 0
            for (
                domain
            ) in settings.SCIHUB_PDFURL_BASE:  # 使用settings中的SCIHUB_PDFURL_BASE
                # 检查任务是否被撤销
                if check_revoked and await check_revoked(task_id):
                    print(f"任务已被撤销，停止从 Sci-Hub 下载 DOI: {doi}")
                    return False, None, "任务已被撤销", url

                try:
                    domain = domain.removesuffix("/")
                    print(f"尝试从 {domain}/{doi}")
                    url = f"{domain}/{doi}"

                    # 下载PDF
                    async with async_timeout.timeout(
                        24000
                    ):  # 或更大时间，只是为了触发 cancel 支持
                        async with session.get(
                            url, headers=headers, ssl=False
                        ) as pdf_response:
                            status = pdf_response.status
                            if pdf_response.status == 200:
                                file_name = await DOIDownloader._init_file_name(title)
                                file_path = os.path.join(save_dir, file_name)
                                # 检查任务是否被撤销
                                if check_revoked and await check_revoked(task_id):
                                    print(
                                        f"任务已被撤销，停止从 Sci-Hub(base) 下载 DOI: {doi}"
                                    )
                                    return False, None, "任务已被撤销", url, status

                                async with aiofiles.open(file_path, "wb") as f:
                                    await f.write(await pdf_response.read())

                                print(f"成功从Sci-Hub下载: {file_path}")
                                return True, file_path, None, url, status
                            else:
                                print(f"下载失败{pdf_response.status}：{url}")

                except Exception as e:
                    print(f"尝试从 {domain} 下载失败: {str(e)}")
                    continue

            return False, None, "无法从任何Sci-Hub(base)镜像站点下载PDF", url, status

        except Exception as e:
            return False, None, f"Sci-Hub(base)下载出错: {str(e)}", url, status

    @staticmethod
    async def _try_scihub_method(
        session: aiohttp.ClientSession,
        task_id: str,
        doi: str,
        save_dir: str,
        title: str,
        check_revoked=None,
    ) -> tuple[bool, Optional[str], Optional[str], str]:
        """从Sci-Hub下载PDF"""
        try:
            url = ""
            status = 0
            for domain in settings.SCIHUB_DOMAINS:  # 使用settings中的SCIHUB_DOMAINS
                # 检查任务是否被撤销
                if check_revoked and await check_revoked(task_id):
                    print(f"任务已被撤销，停止从 Sci-Hub 下载 DOI: {doi}")
                    return False, None, "任务已被撤销", url

                try:
                    domain = domain.removesuffix("/")
                    print(f"尝试从 {domain}/{doi}")
                    url = f"{domain}/{doi}"

                    # async with aiohttp.ClientSession(headers=headers, trust_env=True,timeout=30) as session:
                    # 首先获取Sci-Hub页面
                    async with async_timeout.timeout(
                        24000
                    ):  # 或更大时间，只是为了触发 cancel 支持
                        async with session.get(
                            url, headers=headers, ssl=False
                        ) as response:
                            status = response.status

                            if response.status != 200:
                                continue

                            # 解析页面内容，查找PDF链接
                            html_content = await response.text()
                            soup = BeautifulSoup(html_content, "html.parser")
                            pdf_iframe = soup.find("embed")
                            if pdf_iframe and pdf_iframe.get("src"):
                                pdf_url = pdf_iframe["src"]
                                if not pdf_url.startswith("http"):
                                    pdf_url = (
                                        "https:" + pdf_url
                                        if pdf_url.startswith("//")
                                        else domain + "/" + pdf_url.lstrip("/")
                                    )

                                print(f"找到PDF链接: {pdf_url}")

                                # 下载PDF
                                async with async_timeout.timeout(
                                    24000
                                ):  # 或更大时间，只是为了触发 cancel 支持
                                    async with session.get(
                                        pdf_url, headers=headers, ssl=False
                                    ) as pdf_response:
                                        status = pdf_response.status
                                        if pdf_response.status == 200:
                                            file_name = (
                                                await DOIDownloader._init_file_name(
                                                    title
                                                )
                                            )
                                            file_path = os.path.join(
                                                save_dir, file_name
                                            )
                                            # 检查任务是否被撤销
                                            if check_revoked and await check_revoked(
                                                task_id
                                            ):
                                                print(
                                                    f"任务已被撤销，停止从 Sci-Hub 下载 DOI: {doi}"
                                                )
                                                return (
                                                    False,
                                                    None,
                                                    "任务已被撤销",
                                                    url,
                                                    status,
                                                )

                                            async with aiofiles.open(
                                                file_path, "wb"
                                            ) as f:
                                                await f.write(await pdf_response.read())

                                            print(f"成功从Sci-Hub下载: {file_path}")
                                            return True, file_path, None, url, status
                                        else:
                                            print(
                                                f"下载失败{pdf_response.status}：{pdf_url}"
                                            )

                except Exception as e:
                    print(f"尝试从 {domain} 下载失败: {str(e)}")
                    continue

            return False, None, "无法从任何Sci-Hub镜像站点下载PDF", url, status

        except Exception as e:
            return False, None, f"Sci-Hub下载出错: {str(e)}", url, status

    @staticmethod
    async def _init_file_name(title: str):
        title = title.replace("Sci-Hub | ", "")
        title = title.replace("\\", "_")
        title = title.replace("/", "_")
        title = title.replace(":", "_")
        title = title.replace("*", "_")
        title = title.replace("?", "_")
        title = title.replace('"', "_")
        title = title.replace("<", "_")
        title = title.replace(">", "_")
        title = title.replace("|", "_")
        return f"{title}.pdf"


class JiangSuDownloader:
    @staticmethod
    async def download_table(
        session: aiohttp.ClientSession,  # 添加session参数
        task_id: str,
        href: str,
        check_revoked=None,
    ) -> tuple[bool, Optional[str], Dict[str, Any]]:
        try:
            async with async_timeout.timeout(
                24000
            ):  # 或更大时间，只是为了触发 cancel 支持
                async with session.get(href, headers=headers, ssl=False) as response:

                    if check_revoked and await check_revoked(task_id):  # 修改为await
                        print(f"任务已被撤销，停止下载 href: {href}")
                        return False, "任务已被撤销", {}
                    if response.status == 200:
                        content = await response.text()
                        data = JiangSuDownloader.parse(content, href)
                        return True, None, data
                    else:
                        return False, f"下载出错:状态码：{response.status}", {}

        except Exception as e:
            return False, f"下载出错: {str(e)}", {}

    @staticmethod
    def parse(html_content, base_url):
        # 解析 HTML 内容
        soup = BeautifulSoup(html_content, "html.parser")

        # 定位表格并跳过第一行
        rows = soup.select_one("#zoom table").find_all("tr")[1:]
        items = []
        for row in rows:
            try:
                # 使用 BeautifulSoup 的 select 方法来定位元素
                product_name = row.select_one("td:nth-of-type(2) p").text.strip()
                register_name = row.select_one("td:nth-of-type(3) p").text.strip()
                register_number = row.select_one("td:nth-of-type(4) p").text.strip()
                register_city = row.select_one("td:nth-of-type(5) p").text.strip()
                # 获取链接并确保它是一个完整的 URL
                tech_report_a = row.select_one("td:nth-of-type(6) p a")
                technical_report_link = tech_report_a["href"] if tech_report_a else ""

                if JiangSuDownloader.is_full_url(technical_report_link) is False:
                    technical_report_link = urljoin(base_url, technical_report_link)

                # 将 item 添加到结果列表
                items.append(
                    {
                        "product_name": product_name,
                        "register_name": register_name,
                        "register_number": register_number,
                        "register_city": register_city,
                        "technical_report_link": technical_report_link,
                    }
                )
            except Exception:
                pass

        return {"title": soup.find("title").text.strip(), "items": items}

    @staticmethod
    def is_full_url(url: str):
        return url.startswith("http://") or url.startswith("https://")

    @staticmethod
    async def dowload_file(
        session: aiohttp.ClientSession,
        task_id: str,
        link: str,
        title: str,
        save_dir: str,
        check_revoked=None,
    ) -> tuple[bool, Optional[str], Optional[str], Dict[str, Any]]:
        try:
            async with async_timeout.timeout(
                24000
            ):  # 或更大时间，只是为了触发 cancel 支持
                async with session.get(
                    link, headers=headers, ssl=False
                ) as pdf_response:
                    status = pdf_response.status
                    if pdf_response.status == 200:
                        file_name = await DOIDownloader._init_file_name(title)
                        file_path = os.path.join(save_dir, file_name)
                        # 检查任务是否被撤销
                        if check_revoked and await check_revoked(task_id):
                            print(f"任务已被撤销，停止下载 link: {link}")
                            return False, None, "任务已被撤销", link, status

                        async with aiofiles.open(file_path, "wb") as f:
                            await f.write(await pdf_response.read())

                        print(f"成功下载: {file_path}")
                        return True, file_path, None, link, status
                    else:
                        print(f"下载失败{pdf_response.status}：{link}")
                        return False, None, "无法下载", link, status
        except Exception as e:
            print(f"下载失败: {str(e)}")
            return False, None, "无法下载", link, status


class NMPADownloader:

    @staticmethod
    async def _input_search_go_to(page: Page, search_keyword: str, input_select: str):
        # 等待搜索框加载
        search_input = await page.wait_for_selector(input_select)
        # 输入关键词并搜索,将原有内容清除,并输入新的查询关键词
        await search_input.evaluate('(element) => element.value = ""')
        await search_input.type(search_keyword)
        search_button = await page.wait_for_selector(".search-input button")
        # 点击按钮
        await search_button.click()

        # 等待网络通信完成
        # await page.wait_for_load_state('networkidle')

    @staticmethod
    async def _get_total_count(page: Page) -> int:
        # 等待class为“.el-pagination”的div 里面的分页加载
        await page.wait_for_selector(".el-pagination")
        # 获取所有搜索结果
        results_span = await page.wait_for_selector(".el-pagination__total")
        results_text = await results_span.inner_text()
        # 取出results_text里的数字转为int
        # 这里用正则式取出数字
        results_text = re.findall(r"\d+", results_text)[0]
        results_text = int(results_text)
        return results_text

    @staticmethod
    async def _close_a_tag(page: Page, is_loop: bool = False):
        # 如果有内容为“关闭”的a标签，则点击它

        # 如果是循环，说明有缓存，不用在等待关闭
        if is_loop:
            return
        try:
            close_a = await page.wait_for_selector("text=关闭")
            if close_a:
                await close_a.click()
        except Exception as e:
            pass

    @staticmethod
    async def download_total_count(
        page: Page,  # 添加session参数
        task_id: str,
        url: str,
        search_keyword: str,
        category: str,
        check_revoked=None,
    ) -> tuple[bool,int,Page, int]:
        
        if check_revoked and await check_revoked(task_id):
            await page.close()
            return True, 0,None,0
        await page.goto(url)
        print(f"开始获取总数{search_keyword}")
        await NMPADownloader._close_a_tag(page, False)

        if check_revoked and await check_revoked(task_id):
            await page.close()
            return True, 0,None,0

        # 等待title属性为“境内医疗器械（备案）”的a标签加载
        link_a = await page.wait_for_selector(f'a[title="{category}"]')
        # 点击a标签
        await link_a.click()

        # 输入框在.search-input的最后一个input，不一定是.search-input的子级，可能是子子级
        async with page.context.expect_page() as page_info:
            await NMPADownloader._input_search_go_to(
                page, search_keyword, ".search-input input"
            )
            
        if check_revoked and await check_revoked(task_id):
            await page.close()
            return True, 0,None,0

        new_page = await page_info.value  # type: Page
        await new_page.wait_for_load_state()
         
        if check_revoked and await check_revoked(task_id):
            await page.close()
            return True, 0,None,0
         
        total = await NMPADownloader._get_total_count(new_page)
        print(f"查询结果数量: {total}")
        return True, 1,new_page, total

    @staticmethod
    async def download_table(
        page: Page,  # 添加session参数
        task_id: str,
        search_keyword: str,
        page_index: int = 1,
        category_int: int = 0,
        is_single_page: int = False,
        is_loop: bool = False,
        year_total_count: int = 0,
        check_revoked=None,
    ) -> tuple[bool, int]:
        # 查询
        if page_index < 1:
            page_index = 1

        print(f"开始查询表格{search_keyword}")
        await NMPADownloader._close_a_tag(page, is_loop)

        print(f"开始查询{search_keyword}")
        try:
            old_text = await page.text_content(
                '.search-result-table table[class="el-table__body"] tr:last-child td:nth-child(2)',
                timeout=2000,
            )
        except Exception as e:
            print("查询结果数量获取失败")
            pass

        try:

            await NMPADownloader._input_search_go_to(
                page, search_keyword, ".sousuo input"
            )
            await page.wait_for_load_state()

            #     await NMPADownloader._input_search_go_to(page, search_keyword, ".sousuo input")

            # json_data = await (await response.value).json()
            # print(json_data)

            empty_text = await page.query_selector(".el-table__empty-text")
            if empty_text:
                print(f"查询条件{search_keyword}没有结果")
                return True, 0
        except Exception as e:
            print(f"查询条件{search_keyword}查询错误：{str(e)}")
            raise e

        await NMPADownloader.wait_content_change(
            page,
            page_index,
            '.search-result-table table[class="el-table__body"] tr:last-child td:nth-child(2)',
            old_text,
        )
        total_count = await NMPADownloader._get_total_count(page)
        first_page_text = await page.text_content(
            '.search-result-table table[class="el-table__body"] tr:last-child td:nth-child(2)'
        )
        # 最后一页
        pagination_last = await page.wait_for_selector(
            '.el-pagination ul[class="el-pager"] li:last-child'
        )
        pagination_last_text = await pagination_last.inner_text()
        last = int(pagination_last_text)
        print(f"总页数{last}")
        curr_page_text = page_index
        start_semaphore = asyncio.Semaphore(1)

        while total_count > 0:
            try:
                async with start_semaphore:
                    jump_input = await page.wait_for_selector(
                        ".el-pagination__jump input"
                    )
                    c_page = await jump_input.input_value()
                    print(f"当前页：{c_page}")

                    db_service.save_entity_by_filter(
                        NMPDErrorDB,
                        {
                            "error_type": "分页错误",
                            "search_keyword": search_keyword,
                            "page": str(curr_page_text),
                            "category": category_int,
                            "year_total": year_total_count,
                            "status": 0,
                        },
                        {
                            "error_type": "分页错误",
                            "search_keyword": search_keyword,
                            "page": str(curr_page_text),
                            "category": category_int,
                        },
                    )

                    if c_page != "1":
                        try:
                            await NMPADownloader.wait_content_change(
                                page,
                                curr_page_text,
                                '.search-result-table table[class="el-table__body"] tr:last-child td:nth-child(2)',
                                first_page_text,
                            )
                        except Exception as e:
                            print(
                                f"1查询条件{search_keyword}分页错误 {curr_page_text}页，错误：{str(e)}"
                            )

                            await asyncio.sleep(10)
                            continue

                    try:
                        old_text = await page.text_content(
                            '.search-result-table table[class="el-table__body"] tr:last-child td:nth-child(2)'
                        )
                        if page_index > 1:

                            goto_input = await page.wait_for_selector(
                                ".el-pagination__jump input"
                            )
                            await goto_input.evaluate('(element) => element.value = ""')
                            await goto_input.type(str(page_index))
                            await goto_input.press("Enter")
                            print(f"直接跳转{page_index}页")
                            page_index = 1  # 恢复
                            await page.wait_for_load_state("networkidle")
                            await NMPADownloader.wait_content_change(
                                page,
                                curr_page_text,
                                '.search-result-table table[class="el-table__body"] tr:last-child td:nth-child(2)',
                                old_text,
                            )

                        # 所以这里的元素是能拿到，但是inner_text()是空字符
                        await page.wait_for_function(
                            """() => {
                                               const el = document.querySelector('table[class="el-table__body"] tr td:nth-child(2)');
                                                return el && el.textContent.trim().length > 0;  }"""
                        )
                    except Exception as e:
                        print(
                            f"2查询条件{search_keyword}分页错误 {curr_page_text}页，错误：{str(e)}"
                        )
                        await asyncio.sleep(10)
                        continue

                    # 等待class为“.search-result-table”的div 里面的第二个table 搜索结果加载
                    results_table = await page.wait_for_selector(
                        '.search-result-table table[class="el-table__body"]'
                    )
                    # 获取分页控件最后的数字
                    curr_page = await page.query_selector(
                        '.el-pagination ul[class="el-pager"] li[class="number active"]'
                    )
                    curr_page_ele = await curr_page.inner_text()
                    curr_page_text = int(curr_page_ele)

                    # 提取备案号列数据
                    results_rows = await results_table.query_selector_all("tbody > tr")
                    if not results_rows or len(results_rows) == 0:
                        break

                    batch_size = 5
                    registration_numbers = []
                    semaphore = asyncio.Semaphore(1)
                    for i in range(0, len(results_rows), batch_size):
                        # 分批次最多取3条
                        tasks = []
                        batch = results_rows[i : i + batch_size]
                        async with semaphore:
                            for row in batch:
                                # 获取第二例的内容备案号
                                column = await row.query_selector("td:nth-child(2)")
                                registration_number = await column.inner_text()
                                registration_numbers.append(registration_number)
                                has = db_service.query_entities(
                                    NMPDErrorDB,
                                    {
                                        "code": registration_number,
                                        "page": str(curr_page_text),
                                    },
                                )
                                if has and len(has) > 0 and has[0].url is not None:
                                    db_service.save_entity_by_filter(
                                        NMPDErrorDB,
                                        {
                                            "page": str(curr_page_text),
                                        },
                                        {
                                            "code": registration_number,
                                            "page": str(curr_page_text),
                                        },
                                    )
                                    continue
                                # 取出最后一列详情列，列的数目未知
                                last_column = await row.query_selector("td:last-child")
                                async with page.context.expect_page() as page_info:
                                    await last_column.click()

                                tasks.append(
                                    NMPADownloader._fetch_detail(
                                        page_info.value,
                                        registration_number,
                                        search_keyword,
                                        category_int,
                                        curr_page_text,
                                    )
                                )
                            if len(tasks) > 0:
                                # 等待所有任务完成
                                await asyncio.gather(*tasks)

                        # await asyncio.sleep(1)  # 将等待时间从2秒减少到1秒

                    print(registration_numbers)
                    print("+++++++++++++++++++++++++++++++++++")

                    # 修改分页状态
                    db_service.save_entity_by_filter(
                        NMPDErrorDB,
                        {
                            "error_type": "分页错误",
                            "search_keyword": search_keyword,
                            "page": str(curr_page_text),
                            "category": category_int,
                            "status": 1,
                        },
                        {
                            "error_type": "分页错误",
                            "search_keyword": search_keyword,
                            "page": str(curr_page_text),
                            "category": category_int,
                        },
                    )

                    if is_single_page:
                        break

                    try:
                        # 下一页按钮
                        next_button = await page.wait_for_selector(
                            '.el-pagination button[class="btn-next"]'
                        )
                        # 获取 next_button disabled
                        disabled = await next_button.get_attribute("disabled")

                        if disabled == "disabled":
                            print(
                                f"查询条件{search_keyword}到了最后一页{curr_page_text}"
                            )
                            break

                        await next_button.click()

                        # 等待下一页请求完成
                        print(f"查询条件{search_keyword}进入第{curr_page_text+1}页")
                        await page.wait_for_load_state("networkidle")
                        await NMPADownloader.wait_content_change(
                            page,
                            curr_page_text + 1,
                            '.search-result-table table[class="el-table__body"] tr:last-child td:nth-child(2)',
                            old_text,
                        )
                        # 等待下一页加载完成
                        print(f"查询条件{search_keyword}第{curr_page_text+1}页加载完成")
                    except Exception as e:
                        print(
                            f"3查询条件{search_keyword}，翻页超时 {curr_page_text+1}页"
                        )
                        page_index = curr_page_text + 1  # 重试该页
                        # 等待50秒
                        await NMPADownloader._input_search_go_to(
                            page, search_keyword, ".sousuo input"
                        )
                        db_service.save_entity_by_filter(
                            NMPDErrorDB,
                            {
                                "error_type": "分页错误",
                                "search_keyword": search_keyword,
                                "page": str(curr_page_text + 1),
                                "category": category_int,
                                "status": 0,
                            },
                            {
                                "error_type": "分页错误",
                                "search_keyword": search_keyword,
                                "page": str(curr_page_text + 1),
                                "category": category_int,
                            },
                        )
                        curr_page_text += 1
                        await asyncio.sleep(10)

            except Exception as e:
                print(
                    f"4查询条件{search_keyword}分页错误 {curr_page_text}页，错误：{str(e)}"
                )
                page_index = curr_page_text  # 重试该页

                await NMPADownloader._input_search_go_to(
                    page, search_keyword, ".sousuo input"
                )
                db_service.save_entity_by_filter(
                    NMPDErrorDB,
                    {
                        "error_type": "分页错误",
                        "search_keyword": search_keyword,
                        "page": str(curr_page_text + 1),
                        "category": category_int,
                        "status": 0,
                    },
                    {
                        "error_type": "分页错误",
                        "search_keyword": search_keyword,
                        "page": str(curr_page_text + 1),
                        "category": category_int,
                    },
                )
                await asyncio.sleep(10)

        return True, total_count

    async def download_table_2(
        page: Page,
        task_id: str,
        search_keyword: str,
        page_index: int = 1,
        category_int: int = 0,
        is_single_page: int = False,
        is_loop: bool = False,
        year_total_count: int = 0, 
        check_revoked=None,
    ):  
        
            if check_revoked and await check_revoked(task_id):
                await page.close()
                return True, 0
            print(f"开始查询表格:{search_keyword},按年数量：{year_total_count}")
            await NMPADownloader._close_a_tag(page, is_loop)
            print(f"开始查询:{search_keyword}")
            if page_index < 1:
                page_index = 1
    
            start_semaphore = asyncio.Semaphore(1)
            curr_page_index = page_index
            category_resp = page.expect_response(
              url_or_predicate=  re.compile(
                    r"https://www\.nmpa\.gov\.cn/datasearch/config/(?!DATE\.json)[^/]+\.json\?7QBHXKaZ=.+"
                ),
              timeout=10000
            )
            search_list_resp = page.expect_response(
                url_or_predicate=  re.compile(
                    r"https://www\.nmpa\.gov\.cn/datasearch/data/nmpadata/search\?7QBHXKaZ=.+"
                ),
                 timeout=10000
            ) 
            if check_revoked and await check_revoked(task_id):
                await page.close()
                return True, 0
            
            #等待 5秒
            #await asyncio.sleep(1)
            
            #raise Exception({"task_id": task_id,"search_keyword": search_keyword,"page_index": page_index,"category_int": category_int,       "year_total_count": year_total_count,"error_type": "page_error","msg":'翻页时错误'})
            
            await NMPADownloader._input_search_go_to(page, search_keyword, ".sousuo input")
            async with category_resp as resp_1:
                response1 = await resp_1.value
                print("config响应url：", response1.url)
                print("config响应状态：", response1.status)
                print("config响应内容：", await response1.json())
                category_data = await response1.json()
                itemId = category_data["itemId"] 
    
            while True:
                async with start_semaphore:
                    where = {
                        "error_type": "分页错误",
                        "search_keyword": search_keyword,
                        "page": str(curr_page_index),
                        "category": category_int,
                    }
                    if check_revoked and await check_revoked(task_id):
                        await page.close()
                        return True, 0
                    db_service.save_entity_by_filter(
                        NMPDErrorDB,
                        {
                            "error_type": "分页错误",
                            "search_keyword": search_keyword,
                            "page": str(curr_page_index),
                            "category": category_int,
                            "year_total": year_total_count,
                            "status": 0,
                        },
                        where,
                    )
    
                    if page_index > 1:
                        search_list_resp = page.expect_response(
                            url_or_predicate=  re.compile(
                                r"https://www\.nmpa\.gov\.cn/datasearch/data/nmpadata/search\?7QBHXKaZ=.+"
                            ),
                            timeout=10000
                        )
        
                        await NMPADownloader._goto_for_page(page, curr_page_index)
                        print(f"查询条件:{search_keyword}直接跳转到第{curr_page_index}页")
                        page_index = 1
                    if check_revoked and await check_revoked(task_id):
                        await page.close()
                        return True, 0
                    try:
                        async with search_list_resp as resp_2:
                            response2 = await resp_2.value
                            print("list响应url：", response2.url)
                            print("list响应状态：", response2.status)
    
                            if response2.status != 200:
                                # 等待5秒
                                print("返回错误，等待几秒继续") 
                                page_index = curr_page_index 
                                await asyncio.sleep(5) 
                                raise Exception({"task_id": task_id,"search_keyword": search_keyword,"page_index": page_index,"category_int": category_int,       "year_total_count": year_total_count,"error_type": "page_error","msg":'翻页时错误'})
    
                            search_list_data = await response2.json()
                            code = search_list_data["code"]
                            msg = search_list_data["message"]
                            total_count = search_list_data["data"]["total"]
                            page_num = search_list_data["data"]["pageNum"]
                            curr_page_index = page_num
                            print(
                                f"查询条件:{search_keyword} 第{curr_page_index}页，返回页码:{page_num}，共{total_count}条，返回码:{code}，msg:{msg}"
                            )
                            if page_num == curr_page_index:
                                list_data = search_list_data["data"]["list"]
                                for item in list_data:
                                    keys = list(item.keys())
                                    registration_number = item[keys[0]]
                                    id = item[keys[-1]]
                                    base64_str = f"itemId={itemId}&id={id}"
                                    # 转base64编码字符串
                                    base64_bytes = base64_str.encode("utf-8")
                                    query = base64.b64encode(base64_bytes).decode("utf-8")
                                    detail_url = f"https://www.nmpa.gov.cn/datasearch/search-info.html?nmpa={query}"
                                    print(f"{registration_number}:{detail_url}")
                                    db_service.save_entity_by_filter(
                                        NMPDErrorDB,
                                        {
                                            "code": registration_number,
                                            "error_type": "详情错误",
                                            "category": category_int,
                                            "url": detail_url,
                                            "search_keyword": search_keyword,
                                            "page": str(curr_page_index),
                                            "nmpd_id": id,
                                        },
                                        {"code": registration_number, "nmpd_id": id},
                                    )
    
                    except Exception as e:
                        print(
                            f"查询条件:{search_keyword}第{curr_page_index}页，等待列表返回错误：{str(e)}"
                        ) 
                        page_index = curr_page_index  
                        raise Exception({"task_id": task_id,"search_keyword": search_keyword,"page_index": page_index,"category_int": category_int,       "year_total_count": year_total_count,"error_type": "page_error","msg":'翻页时错误'})
    
                    db_service.save_entity_by_filter(
                        NMPDErrorDB,
                        {
                            "status": 1,
                        },
                        where,
                    )
                    
                    if check_revoked and await check_revoked(task_id):
                        return True, 0
                    
                    if is_single_page:
                        break
                    await asyncio.sleep(0.4)
                    search_list_resp = page.expect_response(
                       url_or_predicate=   re.compile(
                            r"https://www\.nmpa\.gov\.cn/datasearch/data/nmpadata/search\?7QBHXKaZ=.+"
                        ),
                        timeout=10000
                    )
                    if check_revoked and await check_revoked(task_id):
                        await page.close()
                        return True, 0
                    is_next_page = await NMPADownloader._next_page(page)
                    if not is_next_page:
                        break
            
            return True,1
 
    
    @staticmethod
    async def download_detail_tasks(context:BrowserContext,url:str=None,task_id:str=None,check_revoked=None): 
        try:
            print("开始下载详情")
            where ={"status":0,"error_type":"详情错误"}
            if url:
                where={"url":url}
            semaphore = asyncio.Semaphore(1)
            while True:
                async with semaphore:
                    list=db_service.query_entities_as_dicts(NMPDErrorDB,where,5,0,[("search_keyword", "asc"), ("page::int", "asc")])  
                    if list is None or len(list)==0:
                        break 
                    tasks=[] 
                    if check_revoked and await check_revoked(task_id):
                      return True, 0
                    for item in list:
                        tasks.append(NMPADownloader.dowload_detail(context,item))
                    await asyncio.gather(*tasks)
                    #等待1秒
                    await asyncio.sleep(0.5) 
        except Exception as e:
            print(f"详情页下载任务错误：{str(e)}")
            
    
    
    @staticmethod
    async def dowload_detail(context:BrowserContext, item,task_id:str=None,check_revoked=None):
        try:
            print(item["code"])
            print(item["url"])
            page = await context.new_page() 
            # 设置页面超时时间
            page.set_default_timeout(5000)  # 设置为5秒 
            info = {
                "code": item["code"],
                "content_json": [],
                "search_keyword": item["search_keyword"],
                "category": item["category"],
                "nmpd_id": item["nmpd_id"],
            }
            category_resp = page.expect_response(
               url_or_predicate=   re.compile(
                    r"https://www\.nmpa\.gov\.cn/datasearch/config/(?!DATE\.json)[^/]+\.json\?7QBHXKaZ=.+"
                ), 
                timeout=10000
            )
            search_detail_resp = page.expect_response(
               url_or_predicate=   re.compile(
                    r"https://www\.nmpa\.gov\.cn/datasearch/data/nmpadata/queryDetail\?7QBHXKaZ=.+"
                ),
                timeout=10000
            )
            if check_revoked and await check_revoked(task_id):
                await page.close()
                return False, None
            await page.goto(item["url"])
            async with category_resp as resp_1:
                response1 = await resp_1.value
                print("config响应url：", response1.url)
                print("config响应状态：", response1.status)
                print("\n")
                category_data = await response1.json()
                itemId = category_data["itemId"]
                detailFeild = category_data["detailFeild"]
            if check_revoked and await check_revoked(task_id):
                await page.close()
                return False, None
            
            async with search_detail_resp as resp_2:
                response2 = await resp_2.value
                print("detail响应url：", response2.url)
                print("detail响应状态：", response2.status)
                print("\n")
                if response2.status != 200:
                    pass

                resp_2_data = await response2.json()

                detail_data = resp_2_data["data"]["detail"] 
                for key in detailFeild:
                    field=key["desc"]
                    value=detail_data[key["alias"]] 
                    info["content_json"].append({"key":field, "value": value}) 
            
            
            db_service.save_entity_by_filter(NMPDDB, info, {"code": item["code"],"nmpd_id":item["nmpd_id"]})
            db_service.save_entity_by_filter(NMPDErrorDB, {"status": 1}, {"code": item["code"],"nmpd_id":item["nmpd_id"]} ) 
            return True, info
        except Exception as e:
            print(
                f"详情错误：{item['code']}，错误信息：{str(e)}，url:{item['url']}"
            )
            #等待20秒
            if check_revoked and await check_revoked(task_id):
                await page.close()
                return False, None
            
            await asyncio.sleep(30)
            return False, None
        finally:    
            await page.close()

    @staticmethod
    async def _fetch_detail(
        page_promise,
        registration_numbers: str,
        search_keyword: str,
        category_int: int,
        page_index: int,
    ) -> tuple[bool, dict[str, Any]]:
        try:
            new_page = await page_promise  # type: Page
            url = await NMPADownloader.wait_until_page_url_ready(new_page)
            print(f"[{page_index}][{registration_numbers}] URL: {url}")
            db_service.save_entity_by_filter(
                NMPDErrorDB,
                {
                    "code": registration_numbers,
                    "error_type": "详情错误",
                    "category": category_int,
                    "url": url,
                    "search_keyword": search_keyword,
                    "page": str(page_index),
                },
                {"code": registration_numbers, "url": url},
            )

        except Exception as e:
            print(f"详情链接超时 {registration_numbers} ,error:{str(e)}")
            db_service.save_entity_by_filter(
                NMPDErrorDB,
                {
                    "code": registration_numbers,
                    "error_type": "详情错误",
                    "category": category_int,
                    "search_keyword": search_keyword,
                    "page": page_index,
                },
                {"code": registration_numbers, "page": str(page_index)},
            )
            return False, {
                "code": registration_numbers,
                "error_type": "详情错误",
                "category": category_int,
            }
        finally:
            await new_page.close()

    async def wait_until_page_url_ready(page, timeout=10):
        """等待 page.url 不再是 about:blank"""
        start = asyncio.get_event_loop().time()
        while page.url == "about:blank":
            await asyncio.sleep(0.1)
            if asyncio.get_event_loop().time() - start > timeout:
                raise TimeoutError("等待页面跳转超时，仍是 about:blank")
        return page.url

    async def wait_content_change(
        page: Page, page_index: int, selector: str, old_text: str
    ):

        try:
            await page.wait_for_function(
                """([sel, old]) => {
        const el = document.querySelector(sel);
        return el && el.textContent !== old;
    }""",
                arg=[selector, old_text],
                timeout=5000,  # 最长等 2 秒
            )
        except Exception as e:
            print(f"[{page_index}]{str(e)}")
            raise e

    async def get_detail(page: Page, context):

        # 获取table的所有行
        info = {
            "code": context["code"],
            "content_json": [],
            "search_keyword": context["search_keyword"],
            "category": context["category"],
        }
        detail_table = await page.wait_for_selector("table", timeout=5000)

        detail_rows = await detail_table.query_selector_all("tbody > tr")
        for detail_row in detail_rows:
            # 获取第一列的内容
            key_column = await detail_row.query_selector("td:nth-child(1)")
            key_column_text = await key_column.inner_text()

            # 等待td:nth-child(2) 有内容，这个内容是通过js动态加载的，他的表结构已经加载完成，但是内容还没有加载完成
            # 所以这里的元素是能拿到，但是inner_text()是空字符
            await page.wait_for_function(
                """() => {
        const el = document.querySelector('table tr td:nth-child(2)');
        return el && el.textContent.trim().length > 0;
    }""",
                timeout=5000,
            )
            value_column = await detail_row.query_selector("td:nth-child(2)")

            # # 这里要等待有内容才获取text,因为的开始是个开工架构，但是后面的内容是动态加载的
            # await value_column.wait_for_element_state("stable")
            value_column_text = await value_column.inner_text()
            info["content_json"].append(
                {"key": key_column_text, "value": value_column_text}
            )

        print(info)
        db_service.save_entity_by_filter(NMPDDB, info, {"code": context["code"]})
        db_service.save_entity_by_filter(
            NMPDErrorDB, {"status": 1}, {"code": context["code"]}
        )
        await page.close()

    @staticmethod
    async def _goto_for_page(page: Page, page_index: int):
        goto_input = await page.wait_for_selector(".el-pagination__jump input")
        await goto_input.evaluate('(element) => element.value = ""')
        await goto_input.type(str(page_index))
        await goto_input.press("Enter")

    @staticmethod
    async def _next_page(page: Page) -> bool:
        try:
            # 下一页按钮
            next_button = await page.wait_for_selector(
                '.el-pagination button[class="btn-next"]'
            )
            # 获取 next_button disabled
            disabled = await next_button.get_attribute("disabled")
            if disabled == "disabled":
                return False
            await next_button.click()
            return True
        except Exception as e:
            return False
