#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
ArXiv 论文自动更新脚本

自动从arXiv获取最新的大语言模型相关论文，并更新到README.md中
"""

import os
import re
import time
import json
import feedparser
import requests
from datetime import datetime, timedelta
from typing import List, Dict, Optional
import yaml
from urllib3.util.retry import Retry
from requests.adapters import HTTPAdapter

# Configure requests session with retry
session = requests.Session()
retries = Retry(
    total=5,
    backoff_factor=1,
    status_forcelist=[429, 500, 502, 503, 504],
    allowed_methods=["GET", "POST"]
)
session.mount('http://', HTTPAdapter(max_retries=retries))
session.mount('https://', HTTPAdapter(max_retries=retries))

# 配置
CONFIG = {
    'categories': [
        'cs.CL',  # 计算与语言
        'cs.AI',  # 人工智能
        'cs.LG',  # 机器学习
    ],
    'keywords': [
        'large language model',
        'llm',
        'transformer',
        'attention mechanism',
        'prompt engineering',
        'in-context learning',
        'chain-of-thought',
        'instruction tuning',
        'alignment',
        'scaling laws',
        'efficient attention',
    ],
    'max_results': 30,  # 最大获取论文数
    'days_back': 7,     # 获取最近几天的论文
    'output_file': '../README.md',  # 输出文件路径（根目录的README.md）
}

# 论文分类
PAPER_CATEGORIES = {
    '架构': [
        'transformer', 'attention', 'mixture of experts', 'sparse attention', 
        'efficient attention', 'retrieval', 'memory', 'long context'
    ],
    '训练': [
        'training', 'scaling', 'optimization', 'distributed training', 
        'data efficiency', 'pretraining', 'fine-tuning', 'prompt tuning'
    ],
    '推理': [
        'reasoning', 'chain of thought', 'in-context learning', 'few-shot', 
        'zero-shot', 'prompting', 'decoding', 'sampling'
    ],
    '对齐与安全': [
        'alignment', 'safety', 'constitutional ai', 'reinforcement learning from human feedback',
        'red teaming', 'jailbreak', 'harmful content'
    ],
    '效率': [
        'efficient', 'quantization', 'pruning', 'distillation', 
        'compression', 'small language model', 'edge deployment'
    ],
    '多模态': [
        'multimodal', 'vision-language', 'audio', 'video', 
        'image', 'speech', 'visual', 'cross-modal'
    ],
    '应用': [
        'application', 'tool use', 'agent', 'reasoning', 'planning',
        'code generation', 'mathematics', 'science'
    ]
}

def fetch_recent_papers():
    """从arXiv获取最近的论文"""
    try:
        # 构建更全面的搜索查询
        categories = ' OR '.join([f'cat:{cat}' for cat in CONFIG['categories']])
        keywords = ['llm', 'transformer', 'language model', 'gpt', 'llama']
        keyword_query = ' OR '.join([f'all:{kw}' for kw in keywords])
        
        # 获取当前UTC时间并计算30天前的日期
        from datetime import timezone
        current_date = datetime.now(timezone.utc)
        days_ago = (current_date - timedelta(days=30)).strftime('%Y%m%d')
        today = current_date.strftime('%Y%m%d')
        
        print(f"Current UTC date: {today}")
        print(f"Searching for papers from {days_ago} to {today}")
        
        # 构建完整查询（启用日期过滤）
        search_query = f'({categories}) AND ({keyword_query}) AND submittedDate:[{days_ago} TO {today}]'
        
        # 打印调试信息
        print(f"Searching for recent papers with keywords: {keywords}")
        print(f"Search query: {search_query}")
        
        # 构建API URL
        base_url = 'http://export.arxiv.org/api/query'
        params = {
            'search_query': search_query,
            'start': 0,
            'max_results': min(20, CONFIG['max_results']),
            'sortBy': 'submittedDate',
            'sortOrder': 'descending'
        }
        
        # 打印完整的请求URL（用于调试）
        request_url = f"{base_url}?" + "&".join(f"{k}={v}" for k, v in params.items())
        print(f"Request URL: {request_url}")
        
        # 发送请求
        print("Sending request to arXiv API...")
        try:
            response = session.get(base_url, params=params, timeout=30, 
                               headers={'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64)'})
            print(f"Response status: {response.status_code}")
            print(f"Response content (first 500 chars):\n{response.text[:500]}")
        except Exception as e:
            print(f"Request failed: {str(e)}")
            raise
        response.raise_for_status()
        
        # 解析Atom feed
        feed = feedparser.parse(response.content)
        
        papers = []
        for entry in feed.entries:
            # 提取作者信息
            authors = []
            if 'authors' in entry:
                authors = [author.get('name', '') for author in entry.authors]
            
            # 提取PDF链接
            pdf_url = None
            for link in entry.get('links', []):
                if link.get('type') == 'application/pdf':
                    pdf_url = link.get('href', '').replace('http://', 'https://')
                    break
            
            # 如果没有找到PDF链接，尝试从id构造
            if not pdf_url and 'id' in entry:
                pdf_url = entry.id.replace('abs', 'pdf') + '.pdf'
                pdf_url = pdf_url.replace('http://', 'https://')
            
            papers.append({
                'title': entry.get('title', 'No title'),
                'authors': authors,
                'published': datetime.strptime(entry.published, '%Y-%m-%dT%H:%M:%SZ') if 'published' in entry else None,
                'updated': datetime.strptime(entry.updated, '%Y-%m-%dT%H:%M:%SZ') if 'updated' in entry else None,
                'summary': entry.get('summary', ''),
                'pdf_url': pdf_url,
                'entry_id': entry.get('id', ''),
                'primary_category': entry.get('arxiv_primary_category', {}).get('term', '') if hasattr(entry, 'arxiv_primary_category') else '',
                'categories': [tag.term for tag in entry.get('tags', [])],
                'comment': entry.get('arxiv_comment', ''),
                'doi': entry.get('arxiv_doi', '') if hasattr(entry, 'arxiv_doi') else '',
                'journal_ref': entry.get('arxiv_journal_ref', '') if hasattr(entry, 'arxiv_journal_ref') else ''
            })
        
        print(f"Found {len(papers)} papers")
        return papers
    
    except Exception as e:
        print(f"Error fetching papers: {e}")
        if hasattr(e, 'response') and hasattr(e.response, 'text'):
            print(f"Response: {e.response.text[:500]}")
        return []

def categorize_paper(paper: Dict) -> str:
    """根据论文标题和摘要分类"""
    content = f"{paper['title']} {paper['summary']}".lower()
    
    for category, keywords in PAPER_CATEGORIES.items():
        if any(keyword.lower() in content for keyword in keywords):
            return category
    
    return '其他'

def update_readme(papers: List[Dict]):
    """更新README.md文件"""
    # 按分类分组
    categorized = {category: [] for category in PAPER_CATEGORIES.keys()}
    categorized['其他'] = []
    
    for paper in papers:
        category = categorize_paper(paper)
        categorized[category].append(paper)
    
    # 生成Markdown内容
    today = datetime.now().strftime('%Y年%m月%d日')
    markdown = f"\n\n## 📰 最新论文 (更新于: {today})\n\n"
    markdown += "*注意：本部分内容由脚本自动生成，包含最近一周内arXiv上发布的大语言模型相关论文。*\n\n"
    
    for category, papers_in_category in categorized.items():
        if not papers_in_category:
            continue
            
        markdown += f"### {category}\n\n"
        
        for paper in papers_in_category:
            # 作者列表处理
            authors = ", ".join(paper['authors'][:3])
            if len(paper['authors']) > 3:
                authors += " 等"
                
            # 发表日期
            pub_date = paper['published'].strftime('%Y年%m月%d日') if paper['published'] else '日期未知'
            
            markdown += f"- **{paper['title']}**  \n"
            markdown += f"  *{authors}*  \n"
            markdown += f"  📅 {pub_date} | "
            markdown += f"[论文]({paper['pdf_url']}) | "
            
            if paper.get('doi'):
                markdown += f"[DOI](https://doi.org/{paper['doi']}) | "
                
            markdown += f"[摘要]({paper['entry_id']})\n"
            
            # 添加期刊/会议信息（如果有）
            if paper.get('journal_ref'):
                markdown += f"  *{paper['journal_ref']}*\n"
            elif paper.get('comment'):
                markdown += f"  *{paper['comment']}*\n"
                
            markdown += "\n"
    
    # 读取现有的README内容
    readme_path = os.path.abspath(os.path.join(os.path.dirname(__file__), CONFIG['output_file']))
    existing_content = ""
    
    if os.path.exists(readme_path):
        with open(readme_path, 'r', encoding='utf-8') as f:
            existing_content = f.read()
        
        # 移除旧的论文部分（如果存在）
        if "## 📰 最新论文" in existing_content:
            existing_content = existing_content.split("## 📰 最新论文")[0].strip()
    
    # 合并内容并写入文件
    with open(readme_path, 'w', encoding='utf-8') as f:
        f.write(existing_content + markdown)
    
    print(f"已更新论文列表: {CONFIG['output_file']}")

def main():
    """主函数"""
    print("开始获取最新论文...")
    papers = fetch_recent_papers()
    
    if not papers:
        print("未找到新论文。")
        return
    
    print(f"找到 {len(papers)} 篇新论文。正在更新README...")
    update_readme(papers)
    print("更新完成！")

if __name__ == "__main__":
    main()
