import re
from typing import List
from urllib.parse import urlparse, urljoin

from models import Paper, Conference
from paper_api import PaperApi
from utils import logger, fetch_page_content, extract_keywords_from_abstract


class DBLPCrawler:
    """DBLP会议论文爬虫"""

    def __init__(self):
        self.paper = PaperApi()
        self.base_url = "https://dblp.org"

    def crawl_conference(self, conference: Conference) -> List[Paper]:
        """爬取指定会议的论文"""
        # 获取会议页面
        soup = fetch_page_content(conference.url)
        if not soup or str(conference.year) not in str(soup):
            logger.warning(f"未能获取 {conference.name} {conference.year} 的页面。")
            return []

        # 查找论文条目
        paper_entries = soup.find_all('li', class_='entry inproceedings')
        if not paper_entries:
            logger.warning(f"未找到论文条目。")
            return []

        logger.info(f"找到 {len(paper_entries)} 篇论文。")
        return self._process_paper_entries(paper_entries, conference)

    def _process_paper_entries(self, paper_entries, conference: Conference) -> List[Paper]:
        """处理论文条目列表"""
        papers = []

        for idx, entry in enumerate(paper_entries, 1):
            try:
                paper = self._extract_paper_info_from_dblp_entry(entry, conference.name, conference.year)
                self._enhance_paper_with_abstract(paper, idx, len(paper_entries))
                paper.track = conference.track
                logger.info(f"✅ 已添加: {paper.title[:60]}... ({paper.track})")
                papers.append(paper)
                conference.add_paper(paper)

            except Exception as e:
                logger.error(f"处理论文条目时出错: {e}")
                continue

        return papers

    def _enhance_paper_with_abstract(self, paper: Paper, idx: int, total: int):
        """为论文添加摘要信息"""
        if paper.paper_link and 'doi.org' in paper.paper_link:
            parsed_doi = urlparse(paper.paper_link).path
            if parsed_doi.startswith('/10.'):
                paper.doi = parsed_doi[1:]
                logger.info(f"[{idx}/{total}] 抓取详细信息: {paper.title[:50]}...")
                abstract = self.paper.get_abstract(paper.doi)

                if abstract:
                    paper.abstract = abstract
                    paper.keywords = extract_keywords_from_abstract(abstract)
            else:
                logger.debug(f"DOI链接格式不符: {paper.paper_link}")
        else:
            logger.debug(f"未找到有效的DOI链接，无法抓取详细信息。")

    def _extract_paper_info_from_dblp_entry(self, entry, conference: str, year: int) -> Paper:
        """从DBLP条目提取论文信息"""
        paper = Paper(conference=conference, year=year, title='')

        # 提取标题
        title_elem = entry.find('span', class_='title') or entry.find('span', itemprop='name')
        if title_elem:
            paper.title = title_elem.get_text(strip=True).rstrip('.')
        else:
            first_a = entry.find('a', href=True)
            if first_a and 'rec' not in first_a['href']:
                paper.title = first_a.get_text(strip=True)

        # 提取作者
        author_spans = entry.find_all('span', itemprop='author')
        for span in author_spans:
            name_elem = span.find('span', itemprop='name') or span
            name = name_elem.get_text(strip=True)
            if name:
                paper.authors.append(name)

        # 提取论文链接
        doi_link = entry.find('a', href=re.compile(r'doi\.org/10\.\d+'))
        if doi_link:
            doi_url = doi_link['href']
            paper.paper_link = doi_url if doi_url.startswith('http') else urljoin(self.base_url, doi_url)
        else:
            pdf_link = entry.find('a', string='pdf')
            if pdf_link and pdf_link.get('href'):
                paper.paper_link = urljoin(self.base_url, pdf_link['href'])
        return paper
