import requests
import xml.etree.ElementTree as ET
import json
import argparse
import time
from datetime import datetime

# 解析命令行参数
def parse_arguments():
    parser = argparse.ArgumentParser(description="从arXiv获取计算机科学AI、CL、LG等类别的论文信息。")
    parser.add_argument("-m", "--month", required=True, help="指定月份，格式为YYYYMM")
    parser.add_argument("-w", "--week", required=True, help="指定周，1-5代表每月的第几周，0代表整月")
    parser.add_argument("-c", "--category", choices=['ai', 'cv', 'ro', 'lg', 'ma', 'cl'],
                        help="指定类别，可选值为：ai(Artificial Intelligence), cv(Computer Vision and Pattern Recognition), ro(Robotics), lg(Machine Learning),ma(Multiagent Systems), cl(Computation and Language)")
    return parser.parse_args()

# 构建arXiv API的查询
def build_search_query(category, month, week):
    category_codes = {
        'ai': 'cs.AI',
        'cv': 'cs.CV',
        'ro': 'cs.RO',
        'lg': 'cs.LG',
        'ma': 'cs.MA',
        'cl': 'cs.CL'
    }
    #category_code = category_codes.get(category, 'cs.AI')   # 默认为cs.AI
    category_code = category_codes.get(category)

    week_ranges = {
        "1": ("01", "07"),
        "2": ("08", "14"),
        "3": ("15", "21"),
        "4": ("22", "28"),
        "5": ("29", "31"),
        "0": ("01", "31")
    }
    weeka, weekb = week_ranges[week]

    #for example: search_query = f"(cat:cs.AI OR cat:cs.CL OR cat:cs.LG) AND submittedDate:[{month}{weeka}000000 TO {month}{weekb}235959]"
    search_query = f"(cat:{category_code}) AND submittedDate:[{month}{weeka}000000 TO {month}{weekb}235959]"
    print("month=%s, week=%s, search_query=%s" % (month, week, search_query))
    return search_query

def get_subject(category):
    category_range = {
        'cs.AI' : 'cs - Artificial Intelligence',
        'cs.CV' : 'cs - Computer Vision and Pattern Recognition',
        'cs.RO' : 'cs - Robotics',
        'cs.LG' : 'cs - Machine Learning',
        'cs.MA' : 'cs - Multiagent Systems',
        'cs.CL' : 'cs - Computation and Language'
    }
    return category_range.get(category)

# 获取arXiv API的响应并解析XML
def fetch_papers(search_query, start, max_results):
    # arXiv API的基本URL
    base_url = "http://export.arxiv.org/api/query"
    params = {
        "search_query": search_query,
        "start": start,
        "max_results": max_results,
        "sortBy": "submittedDate",
        "sortOrder": "descending"
    }

    response = requests.get(base_url, params=params)
    if response.status_code == 200:
        return ET.fromstring(response.text)
    else:
        print(f"请求失败，状态码：{response.status_code}")
        return None

# 将arXiv的论文信息转换为CSL JSON格式
def convert_to_csl_json(root):
    csl_json_papers = []

    for entry in root.findall('{http://www.w3.org/2005/Atom}entry'):
        paper_info = {
            'id': entry.find('{http://www.w3.org/2005/Atom}id').text,
            'title': entry.find('{http://www.w3.org/2005/Atom}title').text,
            'summary': entry.find('{http://www.w3.org/2005/Atom}summary').text,
            'published': entry.find('{http://www.w3.org/2005/Atom}published').text,
            'doi': entry.find('{http://arxiv.org/schemas/atom}doi').text if entry.find('{http://arxiv.org/schemas/atom}doi') is not None else '',
            'primary_category': entry.find('{http://arxiv.org/schemas/atom}primary_category').attrib['term'],
            'pdf_url': entry.find('{http://www.w3.org/2005/Atom}link[@type="application/pdf"]').attrib['href'] if entry.find('{http://www.w3.org/2005/Atom}link[@type="application/pdf"]') is not None else ''
        }

        authors = []
        for author_elem in entry.findall('{http://www.w3.org/2005/Atom}author'):
            name = author_elem.find('{http://www.w3.org/2005/Atom}name').text
            affiliation = author_elem.find('{http://arxiv.org/schemas/atom}affiliation').text if author_elem.find('{http://arxiv.org/schemas/atom}affiliation') is not None else ''
            authors.append({'name': name, 'affiliation': affiliation})
        paper_info['authors'] = authors

        # 确保published字段是有效的日期字符串
        published_date = paper_info['published']
        if not isinstance(published_date, str):
            print(f"Invalid date format for paper {paper_info['id']}: {published_date}")
            continue

        # 提取年、月、日部分
        try:
            # 尝试解析日期字符串
            parsed_date = datetime.strptime(published_date[:10], '%Y-%m-%d')
            year = parsed_date.year
            month = parsed_date.month
            day = parsed_date.day
        except ValueError:
            print(f"Invalid date format for paper {paper_info['id']}: {published_date}")
            continue

        # 构建CSL JSON中的作者信息，包括affiliation
        authors = []
        for author_info in paper_info['authors']:
            name_parts = author_info['name'].split()
            family_name = name_parts[-1] if name_parts else ''
            given_name = ' '.join(name_parts[:-1]) if len(name_parts) > 1 else ''
            affiliation = author_info.get('affiliation', '')
            authors.append({
                'family': family_name,
                'given': given_name,
                'affiliation': [{'name': affiliation}] if affiliation else []
            })

        primary_cat = paper_info['primary_category']

        csl_json_paper = {
            'id': paper_info['id'],
            'title': paper_info['title'],
            'author': authors,
            'issued': {'date-parts': [[year, month, day]]},
            'DOI': paper_info['doi'],
            'URL': paper_info['pdf_url'],
            'abstract': paper_info['summary'],
            'type': 'article-journal',
            'container-title': 'arXiv',
            'publisher': 'arXiv',
            'subject': get_subject(primary_cat),
            'primary_category': primary_cat
        }
        csl_json_papers.append(csl_json_paper)
    return csl_json_papers

# 将CSL JSON格式的论文信息写入文件
def write_to_file(filename, csl_json_papers):
    with open(filename, 'w', encoding='utf-8') as f:
        json.dump(csl_json_papers, f, ensure_ascii=False, indent=4)

# 主函数
def main():
    all_csl_json_papers = []
    args = parse_arguments()
    search_query = build_search_query(args.category, args.month, args.week)

    i = 0
    start = 0
    max_results_per_query = 100
    max_total_results = 1000000
    while len(all_csl_json_papers) < max_total_results:
        i += 1
        print("%i: start to fetch papers..." % i)

        root = fetch_papers(search_query, start, max_results_per_query)
        if root is not None:
            local_csl_json_papers = convert_to_csl_json(root)
            all_csl_json_papers.extend(local_csl_json_papers)

            # 检查是否还有更多结果
            total_results = int(root.find('{http://a9.com/-/spec/opensearch/1.1/}totalResults').text)
            start += max_results_per_query
            if start >= total_results or len(all_csl_json_papers) >= max_total_results:
                break

            # 延迟一段时间，避免请求过快被封IP
            time.sleep(3)

    filename = f"arxiv-papers-{args.category}-{args.month}-{args.week}.csl.json"
    print(f"{filename}: {len(all_csl_json_papers)} papers info have gotten\n")
    write_to_file(filename, all_csl_json_papers)


if __name__ == "__main__":
    main()

