import html
import re
from typing import List, Optional
from urllib.parse import urlparse

import requests
from bs4 import BeautifulSoup

from config import HEADERS
from models import Paper, Conference
from utils import logger, extract_keywords_from_abstract


class ResearchrCrawler:
    """Researchr会议论文爬虫"""

    def __init__(self):
        self.base_url = "https://conf.researchr.org"

    def crawl_conference(self, conference: Conference) -> List[Paper]:
        """爬取指定会议的论文"""
        papers = []

        try:
            u = urlparse(conference.url)
            self.base_url = f'{u.scheme}://{u.netloc}'
            response = requests.get(conference.url, headers=HEADERS, timeout=10)
            response.raise_for_status()

            soup = BeautifulSoup(response.text, 'html.parser')
            title = soup.find('h3', string='Accepted Papers')

            if not title:
                logger.warning(f"未找到 {conference.name} {conference.year} 的Accepted Papers部分")
                return papers

            table = title.find_next('table', class_='table table-condensed')
            if not table:
                logger.warning(f"未找到 {conference.name} {conference.year} 的论文表格")
                return papers

            # 查找表格主体
            tbody = table.find('tbody') or table

            # 遍历所有行
            rows = tbody.find_all('tr')
            logger.info(f"找到 {len(rows)} 篇论文")

            for row in rows:
                paper = self.extract_paper_details(conference.name, conference.year, row)
                if paper:
                    paper.track = conference.track
                    papers.append(paper)
                    conference.add_paper(paper)
                    logger.info(f"✅ 已添加: {paper.title[:60]}... ({paper.track})")

        except Exception as e:
            logger.error(f"爬取 {conference.name} {conference.year} 论文失败: {e}")

        return papers

    def get_abstract_and_affiliations(self, conf: str, year: int, data_model_id: str) -> tuple:
        """获取论文摘要和作者单位"""
        url = f"{self.base_url}/eventDetailsModalByAjaxConferenceEdition"

        form_data = {
            'form_131600131703c411e65b13378d08eb1f6672b5a0259': '1',
            'context': [f'{conf}-{year}', f'{conf}-{year}'],
            'ae03f7f6f951d515a297b161e922205d': data_model_id,
            'eventDetailsModalByAjaxConferenceEdition_ia0_3c411e65b13378d08eb1f6672b5a0259': '1',
            '__ajax_runtime_request__': 'event-modal-loader'
        }

        abstract = ""
        affiliations = {}

        try:
            response = requests.post(url, files={}, data=form_data, headers=HEADERS)
            response.raise_for_status()

            text = response.json()[0]['value']
            text = html.unescape(text)
            soup = BeautifulSoup(text, 'html.parser')

            # 提取摘要
            abstract_element = soup.find('div', {'class': 'bg-info event-description'})
            if abstract_element:
                abstract = abstract_element.get_text()
                abstract = re.sub(r'\s+', ' ', abstract).strip()

            # 提取作者单位
            authors_divs = abstract_element.find_all('div', {'class': 'media-body'})
            for div in authors_divs:
                author = div.find('h5', {'class': 'media-heading'}).getText().strip()
                affiliation = div.find('span', {'class': 'text-black'}).getText()
                affiliations[author] = affiliation

        except Exception as e:
            logger.error(f"获取摘要和单位失败: {e}")

        return abstract, affiliations

    def extract_paper_details(self, conf: str, year: int, row) -> Optional[Paper]:
        """从论文详情页提取信息并创建Paper对象"""
        try:
            # 获取表格单元格
            cells = row.find_all('td')
            if len(cells) < 2:
                return None

            # 提取标题
            title_link = cells[1].find('a', href='#')
            if not title_link:
                return None

            title = title_link.get_text().strip()
            paper = Paper(conference=conf, year=year, title=title)

            # 提取作者
            paper.authors = []
            performers_div = cells[1].find('div', class_='performers')
            if performers_div:
                author_links = performers_div.find_all('a')
                paper.authors = [author.get_text().strip() for author in author_links]

            # 提取摘要和单位
            paper.abstract, affiliations = self.get_abstract_and_affiliations(
                conf, year, title_link['data-event-modal']
            )
            paper.affiliations = []
            for author in paper.authors:
                if author in affiliations:
                    paper.affiliations.append(affiliations[author])
                else:
                    paper.affiliations.append('')
            paper.keywords = extract_keywords_from_abstract(paper.abstract)

            # 提取DOI链接
            publication_links = cells[1].find_all('a', class_="publication-link navigate")
            for link in publication_links:
                if link.get_text().strip() == 'DOI':
                    doi_url = link['href'].strip()
                    if doi_url.startswith('https://doi.org/'):
                        paper.doi = doi_url.replace('https://doi.org/', '')
                        paper.paper_link = f'https://dl.acm.org/doi/{paper.doi}'
                    else:
                        paper.paper_link = doi_url
                    break

            # 创建Paper对象
            return paper

        except Exception as e:
            logger.error(f"提取论文详情错误: {e}")
            return None
