"""
Papers with Code 数据源
获取最新的AI论文和相关代码实现
"""
import requests
from typing import List, Dict, Any
import json
import os
from datetime import datetime, timedelta

CACHE_DIR = "./cache"
CACHE_FILE = os.path.join(CACHE_DIR, "pwc_cache.json")
CACHE_EXPIRATION_HOURS = 1

def fetch_papers_with_code(query: str = "machine learning", max_results: int = 20) -> List[Dict[str, Any]]:
    """
    从Papers with Code获取最新论文
    """
    os.makedirs(CACHE_DIR, exist_ok=True)
    
    # 检查缓存
    if os.path.exists(CACHE_FILE):
        with open(CACHE_FILE, "r", encoding="utf-8") as f:
            cache_data = json.load(f)
        cached_time = datetime.fromisoformat(cache_data["timestamp"])
        if (datetime.now() - cached_time < timedelta(hours=CACHE_EXPIRATION_HOURS) and 
            cache_data.get("query") == query and 
            cache_data.get("max_results") == max_results):
            print(f"[DEBUG] Returning cached Papers with Code results.")
            return cache_data["papers"]
    
    try:
        # Papers with Code API
        url = "https://paperswithcode.com/api/v1/papers/"
        params = {
            'q': query,
            'ordering': '-published',  # 按发布日期降序
        }
        
        headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
        }
        
        response = requests.get(url, params=params, headers=headers, timeout=30)
        response.raise_for_status()
        
        data = response.json()
        papers = []
        
        for item in data.get('results', [])[:max_results]:
            paper = {
                'title': item.get('title', ''),
                'url': item.get('url_abs', ''),
                'published': item.get('published', ''),
                'authors': ', '.join([author.get('name', '') for author in item.get('authors', [])]),
                'abstract': item.get('abstract', ''),
                'github_url': item.get('repository_url', ''),
                'tasks': [task.get('name', '') for task in item.get('tasks', [])],
                'source': 'papers_with_code'
            }
            papers.append(paper)
        
        # 保存到缓存
        cache_data = {
            "timestamp": datetime.now().isoformat(),
            "query": query,
            "max_results": max_results,
            "papers": papers
        }
        with open(CACHE_FILE, "w", encoding="utf-8") as f:
            json.dump(cache_data, f, ensure_ascii=False, indent=2)
        
        return papers
        
    except Exception as e:
        print(f"获取Papers with Code数据时出错: {e}")
        return []

def fetch_trending_papers(days: int = 7) -> List[Dict[str, Any]]:
    """
    获取最近几天的热门论文
    """
    try:
        url = "https://paperswithcode.com/api/v1/papers/"
        params = {
            'ordering': '-stars',  # 按星标数排序
            'published_after': (datetime.now() - timedelta(days=days)).strftime('%Y-%m-%d')
        }
        
        headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
        }
        
        response = requests.get(url, params=params, headers=headers, timeout=30)
        response.raise_for_status()
        
        data = response.json()
        papers = []
        
        for item in data.get('results', [])[:10]:  # 取前10篇热门论文
            paper = {
                'title': item.get('title', ''),
                'url': item.get('url_abs', ''),
                'published': item.get('published', ''),
                'authors': ', '.join([author.get('name', '') for author in item.get('authors', [])]),
                'abstract': item.get('abstract', ''),
                'github_url': item.get('repository_url', ''),
                'stars': item.get('stars', 0),
                'tasks': [task.get('name', '') for task in item.get('tasks', [])],
                'source': 'papers_with_code_trending'
            }
            papers.append(paper)
        
        return papers
        
    except Exception as e:
        print(f"获取热门论文时出错: {e}")
        return []