import time
import os
import requests
import xml.etree.ElementTree as ET
import argparse
from config import (
    ARXIV_API_ENDPOINT,
    HEADERS,
    OUTPUT_JSON_PATH,
    OUTPUT_JSON_IDS_PATH,
    REQUEST_INTERVAL,
    MAX_RESULTS,
    batch_size
)
from datetime import datetime
import json


def fetch_results(search_query, start=0, max_results=batch_size):
    """分页获取arXiv论文数据"""
    # 确保API端点以单个问号结尾
    base_url = ARXIV_API_ENDPOINT.rstrip('?')
    # 手动构建完整URL避免自动编码
    query_url = f"{base_url}?search_query={SEARCH_QUERY}&start={start}&max_results={max_results}&sortBy=submittedDate&sortOrder=descending"
    print(f"请求URL: {query_url}")  # 打印完整URL用于调试
    try:
        response = requests.get(
            query_url,
            params=None,  # 禁用requests的自动参数编码
            headers=HEADERS,
            timeout=10
        )
        response.raise_for_status()
        return response.content
    except (requests.HTTPError, requests.ConnectionError) as e:
        print(f"请求失败: {str(e)}")
        time.sleep(30)  # 长等待后重试
        return None


def parse_entry(entry):
    """解析单个论文条目"""
    return {
        "arxiv_id": entry.find("{http://www.w3.org/2005/Atom}id").text.split('/')[-1],
        "title": entry.find("{http://www.w3.org/2005/Atom}title").text.strip(),
        "abstract": entry.find("{http://www.w3.org/2005/Atom}summary").text.strip(),
        "authors": [author.find("{http://www.w3.org/2005/Atom}name").text
                    for author in entry.findall("{http://www.w3.org/2005/Atom}author")],
        "published": entry.find("{http://www.w3.org/2005/Atom}published").text,
        "doi": entry.find("{http://arxiv.org/schemas/atom}doi")[0].text if entry.find("{http://arxiv.org/schemas/atom}doi") else None
    }


def crawl_arxiv():
    start = 0

    # if os.path.exists(OUTPUT_JSON_PATH):
    #     os.remove(OUTPUT_JSON_PATH)
    total_ids = []
    total_papers = []
    while start < MAX_RESULTS:
        print(f"正在获取 {start}-{start+batch_size} 条论文...")
        xml_data = fetch_results(SEARCH_QUERY, start, batch_size)
        # print(f"已获取 {len(entries)} 条论文")

        # print(xml_data)
        if not xml_data:
            continue

        root = ET.fromstring(xml_data)
        entries = root.findall("{http://www.w3.org/2005/Atom}entry")

        for entry in entries:
            paper_data = parse_entry(entry)
            # print(f"获取论文: {paper_data['arxiv_id']}")
            total_ids.append(paper_data['arxiv_id'])
            total_papers.append(paper_data)
            # save_to_json(paper_data, OUTPUT_JSON_PATH)

        start += len(entries)

        if len(entries) < batch_size:
            break

        time.sleep(REQUEST_INTERVAL.total_seconds())
    print(f"共获取 {len(total_ids)} 条论文数据。")
    json.dump(total_ids, open(OUTPUT_JSON_IDS_PATH, 'w', encoding='utf-8'), indent=2,
              default=str, ensure_ascii=False)

    json.dump(total_papers, open(OUTPUT_JSON_PATH, 'w', encoding='utf-8'), indent=2,
              default=str, ensure_ascii=False)
    # close_json_file()


if __name__ == "__main__":
    parser = argparse.ArgumentParser(description='arXiv论文爬虫')
    parser.add_argument("-c", "--categories", nargs='+',
                        help="论文类别列表（例如 cs.CV cs.LG）")
    parser.add_argument("-s", "--start_date", required=True,
                        help="起始日期（格式：YYYYMMDDTTTT），例如202505010100")
    parser.add_argument("-e", "--end_date", required=True,
                        help="结束日期（格式：YYYYMMDDTTTT），例如202505080330")
    parser.add_argument(
        "-o", "--output", default=OUTPUT_JSON_PATH, help="输出文件路径")
    args = parser.parse_args()

    try:
        global SEARCH_QUERY
        # 保持原始日期格式（YYYY-MM-DD）
        start_date = args.start_date if args.start_date else ""
        end_date = args.end_date if args.end_date else ""

        # 手动构建并编码查询字符串保留特殊符号
        time_range = f'submittedDate:[{start_date}+TO+{end_date}]'  # 保持方括号和空格
        raw_query = f"{time_range}"
        cat_query = ''
        if args.categories:
            # 检查类别是否为空
            if not args.categories:
                pass

            # 处理类别列表，确保没有空字符串
            else:
                cat_query = "+OR+".join(
                    [f"cat:{cat}" for cat in args.categories])
                raw_query = f"({cat_query})+AND+{time_range}"
            # 构建查询字符串

        # 使用quote保留关键符号
        from urllib.parse import quote
        # 手动替换AND周围的空格为+
        # 编码时保留方括号和+
        encoded_query = quote(raw_query, safe=':+[]')
        SEARCH_QUERY = encoded_query

        print(f"搜索条件: {SEARCH_QUERY}")
        crawl_arxiv()
        print(f"爬取完成！结果已保存至 {OUTPUT_JSON_PATH}")
    except KeyboardInterrupt:
        print("\n用户中断，正在保存已获取的数据...")
