import logging
import re
import time
from typing import Optional

import requests
from bs4 import BeautifulSoup
from playwright.sync_api import sync_playwright, TimeoutError as PlaywrightTimeoutError

from config import HEADERS

logger = logging.getLogger(__name__)


class PaperApi:
    def __init__(self):
        self.playwright = None
        self.browser = None
        self.context = None
        self.last_init_time = 0
        self.init_playwright()  # 初始化时直接启动

    def __del__(self):
        self.close_playwright()

    def close_playwright(self):
        """安全关闭 Playwright 资源"""
        try:
            if self.context:
                self.context.close()
                self.context = None
            if self.browser:
                self.browser.close()
                self.browser = None
            if self.playwright:
                self.playwright.stop()
                self.playwright = None
            logger.info("Playwright 已关闭")
        except Exception as e:
            logger.error(f"关闭 Playwright 时出错: {e}")

    def get_abstract(self, doi) -> str:
        # 尝试不同的摘要获取方式，增加超时处理
        methods = [
            self.get_abstract_by_semantic_scholar_api,
            self.get_abstract_by_crossref_api,
            self.get_abstract_by_acm
        ]

        for method in methods:
            try:
                abstract = method(doi)
                if abstract:
                    return abstract
                time.sleep(1)  # 方法间短暂延迟
            except Exception as e:
                logger.warning(f"{method.__name__} 失败: {e}")
                continue

        return ''

    @staticmethod
    def get_abstract_by_semantic_scholar_api(doi) -> Optional[str]:
        try:
            time.sleep(1)
            api_url = f"https://api.semanticscholar.org/graph/v1/paper/DOI:{doi}?fields=abstract"
            response = requests.get(api_url, HEADERS, timeout=15)

            if response.status_code == 200:
                data = response.json()
                abstract = data.get('abstract', None)
                return abstract

            logger.error(f"Semantic Scholar API {api_url} 错误: {response.status_code}")
            return None

        except Exception as e:
            logger.error(f"获取Semantic Scholar关键词错误: {str(e)}")
            return None

    @staticmethod
    def get_abstract_by_crossref_api(doi) -> Optional[str]:
        try:
            url = f"https://api.crossref.org/works/{doi}"
            response = requests.get(url, timeout=15)
            logger.info(f'get_abstract_by_crossref_api {url}')

            if response.status_code == 200:
                paper_info = response.json().get('message', {})
                return paper_info.get('abstract', None)
            else:
                logger.error(f"Error: {response.status_code}")
                return None
        except Exception as e:
            logger.error(f"Crossref API 请求失败: {e}")
            return None

    def init_playwright(self):
        """初始化 Playwright，增加重试机制"""
        if self.playwright and time.time() - self.last_init_time < 300:  # 5分钟内不重复初始化
            return

        max_retries = 3
        for attempt in range(max_retries):
            try:
                self.close_playwright()  # 先清理可能存在的旧实例

                self.playwright = sync_playwright().start()
                # 启动浏览器，增加超时设置
                self.browser = self.playwright.chromium.launch(
                    headless=True,
                    timeout=60000  # 60秒启动超时
                )
                self.context = self.browser.new_context(
                    user_agent='Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/128.0.0.0 Safari/537.36',
                    viewport={'width': 1920, 'height': 1080},
                    java_script_enabled=True,
                    ignore_https_errors=True,
                )
                # 防止被识别为自动化工具
                self.context.add_init_script("""
                    Object.defineProperty(navigator, 'webdriver', {
                        get: () => false,
                    });
                    window.chrome = {
                        runtime: {},
                    };
                """)
                self.last_init_time = time.time()
                logger.info("Playwright 初始化成功")
                return
            except Exception as e:
                logger.error(f"Playwright 初始化失败 (尝试 {attempt + 1}/{max_retries}): {e}")
                self.close_playwright()
                if attempt == max_retries - 1:
                    raise
                time.sleep(2 ** attempt)  # 指数退避

    @staticmethod
    def extract_abstract(soup: BeautifulSoup):
        try:
            # 尝试多种选择器定位摘要
            selectors = [
                'section#abstract',
                'div.abstract',
                'div[class*="abstract"]',
                'div.abstractSection',
                'div[data-testid="abstract"]'
            ]

            for selector in selectors:
                abstract_element = soup.select_one(selector)
                if abstract_element:
                    # 尝试提取段落文本
                    paragraphs = abstract_element.find_all(['p', 'div'], recursive=False)
                    if paragraphs:
                        abstract_text = ' '.join(p.get_text(strip=True) for p in paragraphs)
                    else:
                        abstract_text = abstract_element.get_text(strip=True)

                    if abstract_text and len(abstract_text) > 50:  # 确保有足够长度的摘要
                        return abstract_text

            logger.warning("未找到摘要内容")
            return ""
        except Exception as e:
            logger.error(f"提取摘要时发生错误: {e}")
            return ""

    def get_abstract_by_acm(self, doi) -> Optional[str]:
        acm_url = f'https://doi.org/{doi}'

        # 确保 Playwright 已初始化
        if not self.context:
            try:
                self.init_playwright()
            except Exception as e:
                logger.error(f"无法初始化 Playwright: {e}")
                return None

        page = None
        max_retries = 2
        for attempt in range(max_retries):
            try:
                logger.info(f"正在抓取ACM页面 (尝试 {attempt + 1}/{max_retries}): {acm_url}")
                # 创建新页面
                page = self.context.new_page()
                # 设置合理的超时时间
                page.set_default_timeout(30000)  # 30秒
                page.set_default_navigation_timeout(30000)  # 30秒

                # 使用更可靠的等待策略
                page.goto(acm_url, wait_until="domcontentloaded")

                # 等待页面关键元素加载
                try:
                    page.wait_for_selector('body', state='attached', timeout=10000)
                except PlaywrightTimeoutError:
                    logger.warning("页面主体加载超时")

                # 获取页面内容
                content = page.content()
                soup = BeautifulSoup(content, 'lxml')

                # 提取摘要
                abstract = self.extract_abstract(soup)
                abstract = re.sub(r'\s+', ' ', abstract).strip()

                if abstract:
                    logger.info(f"成功获取摘要: {abstract[:100]}...")
                    return abstract
                else:
                    logger.warning("未找到摘要内容")

            except PlaywrightTimeoutError:
                logger.warning(f"ACM页面加载超时 (尝试 {attempt + 1})")
                if attempt == max_retries - 1:
                    logger.error(f"所有尝试均超时: {acm_url}")
            except Exception as e:
                logger.error(f"抓取ACM页面失败: {e}")
            finally:
                # 确保页面被关闭
                if page:
                    try:
                        page.close()
                    except Exception:
                        pass

        return None
