import re
from bs4 import BeautifulSoup
from typing import Dict, List, Any, Optional
from datetime import datetime
import requests
import time
import random
from .base_crawler import BaseCrawler

class ArxivCrawler(BaseCrawler):
    """
    arXiv论文爬虫，爬取最新的AI相关论文
    """
    
    def __init__(self, source_config: Dict[str, Any]):
        """
        初始化arXiv爬虫
        
        Args:
            source_config: 数据源配置
        """
        super().__init__(source_config)
        # arXiv AI相关分类
        self.categories = [
            'cs.AI',  # 人工智能
            'cs.CL',  # 计算语言学
            'cs.CV',  # 计算机视觉
            'cs.LG',  # 机器学习
            'cs.NE',  # 神经与进化计算
            'stat.ML'  # 统计机器学习
        ]
        
    def extract_data(self, soup: BeautifulSoup) -> List[Dict[str, Any]]:
        """
        从arXiv页面提取论文数据
        
        Args:
            soup: 解析后的页面
            
        Returns:
            论文数据列表
        """
        results = []
        articles = soup.find_all('li', class_='arxiv-result')
        
        if not articles:  # 新版arXiv页面可能不同
            dt_tags = soup.find_all('dt')
            for dt in dt_tags:
                try:
                    dd = dt.find_next_sibling('dd')
                    if not dd:
                        continue
                        
                    paper_id = ''
                    arxiv_link = dt.find('a', title='Abstract')
                    if arxiv_link and 'href' in arxiv_link.attrs:
                        link_parts = arxiv_link['href'].split('/')
                        if len(link_parts) > 0:
                            paper_id = link_parts[-1]
                    
                    title_element = dd.find('div', class_='list-title')
                    title = title_element.text.replace('Title:', '').strip() if title_element else "无标题"
                    
                    authors_element = dd.find('div', class_='list-authors')
                    authors = authors_element.text.replace('Authors:', '').strip() if authors_element else "未知作者"
                    authors = [a.strip() for a in authors.split(',')]
                    
                    abstract_element = dd.find('p', class_='mathjax')
                    abstract = abstract_element.text.strip() if abstract_element else ""
                    
                    # 获取分类信息
                    categories_element = dd.find('div', class_='list-subjects')
                    categories = categories_element.text.replace('Subjects:', '').strip() if categories_element else ""
                    categories = [c.strip() for c in categories.split(';')]
                    
                    # 获取时间信息
                    date_element = dd.find('div', class_='list-date')
                    date_str = date_element.text.replace('Date:', '').strip() if date_element else ""
                    try:
                        date = datetime.strptime(date_str, '%a, %d %b %Y %H:%M:%S %Z').isoformat()
                    except:
                        date = datetime.now().isoformat()
                    
                    paper_data = {
                        'type': 'paper',
                        'id': paper_id,
                        'title': title,
                        'authors': authors,
                        'abstract': abstract,
                        'categories': categories,
                        'published_date': date,
                        'url': f'https://arxiv.org/abs/{paper_id}',
                        'pdf_url': f'https://arxiv.org/pdf/{paper_id}.pdf'
                    }
                    results.append(paper_data)
                except Exception as e:
                    self.logger.error(f"Error parsing paper entry: {str(e)}")
        else:
            # 处理新版arXiv页面
            for article in articles:
                try:
                    title_element = article.find('p', class_='title')
                    title = title_element.text.strip() if title_element else "无标题"
                    
                    authors_element = article.find('p', class_='authors')
                    authors = authors_element.text.strip() if authors_element else "未知作者"
                    authors = [a.strip() for a in authors.split(',')]
                    
                    abstract_element = article.find('p', class_='abstract')
                    abstract = abstract_element.text.strip() if abstract_element else ""
                    
                    link_element = article.find('a', class_='abstract-link')
                    paper_id = ""
                    if link_element and 'href' in link_element.attrs:
                        match = re.search(r'/abs/([^/]+)$', link_element['href'])
                        if match:
                            paper_id = match.group(1)
                    
                    paper_data = {
                        'type': 'paper',
                        'id': paper_id,
                        'title': title,
                        'authors': authors,
                        'abstract': abstract,
                        'categories': [],  # 需要从详情页获取
                        'published_date': datetime.now().isoformat(),  # 需要从详情页获取
                        'url': f'https://arxiv.org/abs/{paper_id}',
                        'pdf_url': f'https://arxiv.org/pdf/{paper_id}.pdf'
                    }
                    
                    # 如果有详情页链接，补充获取详细信息
                    if paper_id:
                        try:
                            details = self._get_paper_details(paper_id)
                            if details:
                                paper_data.update(details)
                        except Exception as e:
                            self.logger.error(f"Error fetching details for {paper_id}: {str(e)}")
                    
                    results.append(paper_data)
                except Exception as e:
                    self.logger.error(f"Error parsing paper entry: {str(e)}")
                
        return results
    
    def _get_paper_details(self, paper_id: str) -> Optional[Dict[str, Any]]:
        """
        获取论文详细信息
        
        Args:
            paper_id: 论文ID
            
        Returns:
            论文详细信息
        """
        url = f'https://arxiv.org/abs/{paper_id}'
        try:
            # 添加随机延迟，避免被封
            time.sleep(random.uniform(1, 3))
            soup = self.fetch_page(url)
            if not soup:
                return None
                
            # 提取分类
            categories = []
            categories_element = soup.find('span', class_='primary-subject')
            if categories_element:
                primary_category = categories_element.text.strip()
                categories.append(primary_category)
            
            all_categories = soup.find_all('a', title=re.compile(r'find.*', re.I))
            for category in all_categories:
                if 'href' in category.attrs and '/list/' in category['href']:
                    cat_text = category.text.strip()
                    if cat_text not in categories:
                        categories.append(cat_text)
            
            # 提取日期
            date_str = ""
            date_element = soup.find('div', class_='dateline')
            if date_element:
                date_match = re.search(r'\[(\w+),\s+(\d+\s+\w+\s+\d+)', date_element.text)
                if date_match:
                    date_str = date_match.group(2)
            
            try:
                published_date = datetime.strptime(date_str, '%d %b %Y').isoformat()
            except:
                published_date = datetime.now().isoformat()
                
            # 提取引用次数和相关链接（如有）
            citation_count = 0
            citation_element = soup.find('td', text=re.compile('Citations:', re.I))
            if citation_element:
                next_td = citation_element.find_next_sibling('td')
                if next_td:
                    try:
                        citation_count = int(re.search(r'\d+', next_td.text).group())
                    except:
                        pass
            
            return {
                'categories': categories,
                'published_date': published_date,
                'citation_count': citation_count
            }
            
        except Exception as e:
            self.logger.error(f"Error fetching details for {paper_id}: {str(e)}")
            return None
    
    def crawl(self) -> List[Dict[str, Any]]:
        """
        爬取所有配置的分类
        
        Returns:
            合并后的论文数据
        """
        all_papers = []
        
        for category in self.categories:
            self.logger.info(f"Crawling arXiv category: {category}")
            category_url = f"{self.url.rstrip('/')}/{category}/recent"
            soup = self.fetch_page(category_url)
            if soup:
                try:
                    papers = self.extract_data(soup)
                    # 添加元数据
                    for paper in papers:
                        paper.update({
                            'source': self.name,
                            'source_url': category_url,
                            'crawled_at': datetime.now().isoformat(),
                            'category': category
                        })
                    all_papers.extend(papers)
                    self.logger.info(f"Successfully crawled {len(papers)} papers from {category}")
                    # 添加延迟，避免请求过于频繁
                    time.sleep(random.uniform(2, 5))
                except Exception as e:
                    self.logger.error(f"Error extracting data from {category}: {str(e)}")
        
        return all_papers 