import requests
import os
from urllib.parse import urljoin, urlparse
import json
import argparse
import time
from urllib.robotparser import RobotFileParser
from urllib.error import HTTPError

def download_pdf(pdf_url, save_dir, pdf_name, headers=None):
    """下载并保存PDF文件"""
    filename = pdf_name

    # 保存文件
    file_path = os.path.join(save_dir, filename)
    if os.path.exists(file_path):
        print(f"文件已存在: {file_path}")
        return {"status": "success", "filename": filename, "url": pdf_url}

    try:
        response = requests.get(pdf_url, headers=headers, timeout=10)
        response.raise_for_status()

        # 创建保存目录
        os.makedirs(save_dir, exist_ok=True)

        with open(file_path, 'wb') as f:
            f.write(response.content)
        return {"status": "success", "filename": filename, "url": pdf_url}
    except Exception as e:
        print(f"下载失败 [{pdf_url}]: {e}")
        return {"status": "error", "url": pdf_url, "error": str(e)}


def download_from_file(filename, save_dir):
    with open(filename, 'r', encoding='utf-8') as f:
        data = json.load(f)

    for item in data:
        pdf_url = item["url"]
        pdf_name = item["filename"]
        result = download_pdf(pdf_url, save_dir, pdf_name)
        print(result)
        time.sleep(2)

if __name__ == "__main__":
    parser = argparse.ArgumentParser(description="PDF下载工具")
    parser.add_argument("--save-dir", default="pdfs", help="文件保存目录（默认：pdfs）")
    parser.add_argument("--json-file", default="result.json", help="jsonfile（默认：result.json）")

    args = parser.parse_args()

    download_from_file(args.json_file, args.save_dir)

