#!/usr/bin/env python
# -*- coding: UTF-8 -*-
'''
@Project ：academic_trend_analysis
@File    ：academic_crawler.py
@IDE     ：PyCharm
@Author  ：iyoahs
@Date    ：2025/6/20 11:35
@Describe：arXiv爬虫、保存至mongodb
'''
import feedparser
import os
from datetime import datetime, timedelta
from urllib.parse import quote_plus
import requests
from typing import List, Dict, Optional, Tuple
from src.utils.logger import setup_logger
from src.utils.mongodb_utils import MongoDBUtils
from bson.objectid import ObjectId

logger = setup_logger()


class ArxivCrawler:
    """用于爬取arXiv的数据"""

    def __init__(self, mongo_config: Dict):
        self.mongodb_utils = MongoDBUtils(mongo_config)

    def get_time_window(self, hours: int) -> Tuple[datetime, datetime]:
        """获取时间窗口"""
        now = datetime.utcnow()
        start_time = now - timedelta(hours=hours)
        return start_time, now

    def parse_arxiv_date(self, date_str: str) -> datetime:
        """解析arXiv日期"""
        return datetime.strptime(date_str, "%Y-%m-%dT%H:%M:%SZ")

    def build_search_query(self, query_terms: List[str], hours: int) -> str:
        """构建搜索查询"""
        query_str = " OR ".join([f'"{q}"' for q in query_terms])
        start_time = (datetime.utcnow() - timedelta(hours=hours)).strftime("%Y%m%d%H%M%S")
        date_filter = f'submittedDate:[{start_time}00Z TO NOW]'
        return quote_plus(f'({query_str}) AND {date_filter}')

    def fetch_papers(self, query_terms: List[str], hours: int, max_results: int) -> List[Dict]:
        """从arXiv获取论文"""
        base_url = 'http://export.arxiv.org/api/query?'
        search_query = self.build_search_query(query_terms, hours)
        url = f'{base_url}search_query={search_query}&max_results={max_results}'
        return feedparser.parse(url).entries

    def process_entry(self, entry, start_time: datetime) -> Optional[Dict]:
        """处理单个论文条目"""
        try:
            published = self.parse_arxiv_date(entry.published)
            if published <= start_time:
                return None

            paper = {
                "title": entry.title.strip(),
                "authors": [author.name for author in entry.authors],
                "abstract": entry.summary.strip().replace("\n", " "),
                "url": entry.link,
                "pdf_url": next((link.href for link in entry.links
                                 if link.rel == 'alternate' and link.type == 'application/pdf'), None),
                "published": published.isoformat(),
                "id": entry.id.split('/')[-1],
                "updated": datetime.utcnow().isoformat(),
                "source": "arxiv"
            }
            return paper
        except Exception as e:
            logger.error(f"Error processing entry: {entry.title[:50]}... - {e}")
            return None

    def crawl_recent_papers(self, query_terms: List[str], hours: int,
                            max_results: int) -> List[Dict]:
        """爬取最近的论文"""
        start_time, _ = self.get_time_window(hours)
        entries = self.fetch_papers(query_terms, hours, max_results)
        papers = [p for p in (self.process_entry(e, start_time) for e in entries) if p]

        if papers:
            self.mongodb_utils.save_papers(papers)

        logger.info(f"Found {len(papers)} papers published in the last {hours} hours.")
        return papers