import feedparser
import requests
import json
import os
from datetime import datetime, timedelta
from typing import List, Dict
from urllib.parse import quote
from bs4 import BeautifulSoup
import re

CACHE_DIR = "./cache"
CACHE_FILE = os.path.join(CACHE_DIR, "arxiv_cache.json")
CACHE_EXPIRATION_HOURS = 1

def extract_arxiv_images(arxiv_id: str) -> List[str]:
    """
    从arXiv论文页面提取相关图片
    """
    images = []
    try:
        # 构建arXiv论文页面URL
        paper_url = f"https://arxiv.org/abs/{arxiv_id}"
        
        headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
        }
        
        response = requests.get(paper_url, headers=headers, timeout=15)
        response.raise_for_status()
        
        soup = BeautifulSoup(response.content, 'html.parser')
        
        # 查找论文的预览图片
        # arXiv通常在页面中有论文的第一页预览
        img_tags = soup.find_all('img')
        
        for img in img_tags:
            src = img.get('src', '')
            alt = img.get('alt', '')
            
            # 查找论文相关的图片
            if any(keyword in src.lower() for keyword in ['preview', 'figure', 'diagram', 'chart']):
                if src.startswith('//'):
                    src = 'https:' + src
                elif src.startswith('/'):
                    src = 'https://arxiv.org' + src
                elif not src.startswith('http'):
                    continue
                    
                images.append({
                    'url': src,
                    'alt': alt or 'arXiv论文图片',
                    'type': 'preview'
                })
        
        # 如果没找到预览图，尝试获取PDF第一页的缩略图
        if not images:
            # arXiv PDF缩略图通常在这个路径
            pdf_thumb_url = f"https://arxiv.org/pdf/{arxiv_id}.pdf"
            images.append({
                'url': pdf_thumb_url,
                'alt': f'arXiv论文 {arxiv_id} PDF',
                'type': 'pdf_link'
            })
            
    except Exception as e:
        print(f"获取arXiv图片时出错 {arxiv_id}: {e}")
    
    return images[:2]  # 最多返回2张图片

def fetch_arxiv_papers(search_query: str = "cat:cs.AI", max_results: int = 10) -> List[Dict]:
    os.makedirs(CACHE_DIR, exist_ok=True)

    # Check for cached results
    if os.path.exists(CACHE_FILE):
        with open(CACHE_FILE, "r", encoding="utf-8") as f:
            cache_data = json.load(f)
        
        cached_time = datetime.fromisoformat(cache_data["timestamp"])
        if datetime.now() - cached_time < timedelta(hours=CACHE_EXPIRATION_HOURS) and \
           cache_data["search_query"] == search_query and \
           cache_data["max_results"] == max_results:
            print("[DEBUG] Returning cached arXiv results.")
            return cache_data["papers"]

    base_url = "http://export.arxiv.org/api/query?"
    encoded_search_query = quote(search_query)
    query = f"search_query={encoded_search_query}&sortBy=relevance&sortOrder=descending&max_results={max_results}"
    full_url = base_url + query
    print(f"[DEBUG] Fetching from URL: {full_url}")
    
    try:
        response_raw = requests.get(full_url)
        response_raw.raise_for_status() # Raise an HTTPError for bad responses (4xx or 5xx)
        print(f"[DEBUG] Raw API Response (first 500 chars):\n{response_raw.text[:500]}")
        response = feedparser.parse(response_raw.text)
    except requests.exceptions.RequestException as e:
        print(f"[ERROR] Request to arXiv API failed: {e}")
        return []

    print(f"[DEBUG] API returned {len(response.entries)} entries after parsing.")
    
    papers = []
    for entry in response.entries:
        authors = [author.name for author in entry.authors]
        
        # 从URL中提取arXiv ID
        arxiv_id = ""
        if hasattr(entry, 'id') and entry.id:
            # arXiv ID通常在entry.id中，格式如 http://arxiv.org/abs/2301.12345v1
            match = re.search(r'arxiv\.org/abs/(\d+\.\d+)', entry.id)
            if match:
                arxiv_id = match.group(1)
        
        # 获取论文图片
        images = []
        if arxiv_id:
            images = extract_arxiv_images(arxiv_id)
        
        paper_data = {
            "title": entry.title,
            "content": entry.summary,
            "published": entry.published,
            "url": entry.link,
            "authors": ", ".join(authors),
            "arxiv_id": arxiv_id,
            "images": images
        }
        
        papers.append(paper_data)
    
    # Save results to cache
    cache_data = {
        "timestamp": datetime.now().isoformat(),
        "search_query": search_query,
        "max_results": max_results,
        "papers": papers
    }
    with open(CACHE_FILE, "w", encoding="utf-8") as f:
        json.dump(cache_data, f, ensure_ascii=False, indent=4)
        
    return papers

if __name__ == '__main__':
    ai_papers = fetch_arxiv_papers()
    for paper in ai_papers:
        print(f"Title: {paper['title']}\nSummary: {paper['content']}\n---")