import arxiv
import os
import re
from .download_pdf import download_pdf_by_url

def safe_filename(name):
    # 替换所有非法文件名字符为下划线
    return re.sub(r'[\\/:*?"<>|]', '_', name)

def search_arxiv(
    keywords=None,
    title=None,
    author=None,
    abstract=None,
    include_cat=None,
    exclude_cat=None,
    comment=None,
    max_results=10,
    start_date=None,
    end_date=None,
    download_pdf=False,
    save_dir="./papers"
):
    """
    在 arXiv 上搜索论文，返回结构化结果，可选是否下载 PDF。
    返回值为列表，每个元素为字典，包含论文主要信息。
    """
    query_parts = []
    if keywords:
        query_parts.append(f'all:"{keywords}"')
    if title:
        query_parts.append(f'ti:"{title}"')
    if author:
        query_parts.append(f'au:"{author}"')
    if abstract:
        query_parts.append(f'abs:"{abstract}"')
    if include_cat:
        if isinstance(include_cat, list):
            cat_query = " OR ".join([f'cat:{c}' for c in include_cat])
            query_parts.append(f'({cat_query})')
        else:
            query_parts.append(f'cat:{include_cat}')
    if exclude_cat:
        if isinstance(exclude_cat, list):
            cat_query = " ANDNOT ".join([f'cat:{c}' for c in exclude_cat])
            query_parts.append(f'ANDNOT {cat_query}')
        else:
            query_parts.append(f'ANDNOT cat:{exclude_cat}')
    if comment:
        query_parts.append(f'co:"{comment}"')
    query = " AND ".join(query_parts)
    search = arxiv.Search(
        query=query,
        max_results=max_results,
        sort_by=arxiv.SortCriterion.SubmittedDate,
        sort_order=arxiv.SortOrder.Descending
    )
    results = []
    if download_pdf and not os.path.exists(save_dir):
        os.makedirs(save_dir)
    for result in search.results():
        pub_date = result.published.replace(tzinfo=None)
        # 日期过滤
        if start_date and pub_date < start_date:
            continue
        if end_date and pub_date > end_date:
            continue
        if download_pdf:
            filename = safe_filename(result.title) + '.pdf'
            pdf_url = result.pdf_url
            save_path = os.path.join(save_dir, filename)
            download_pdf_by_url(pdf_url, save_path)
        results.append({
            "title": result.title,
            "authors": [a.name for a in result.authors],
            "primary_category": result.primary_category,
            "summary": result.summary,
            "published": pub_date,
            "pdf_url": result.pdf_url,
        })
    return results

def search_paper(title , author):
    pass
