import requests
import os
from urllib.parse import urljoin, urlparse
import json
import argparse
import time
from urllib.robotparser import RobotFileParser
from urllib.error import HTTPError

global_index = 0
global_total = 1
robots_cache = {}

def is_allowed(url, user_agent):
    """检查给定URL是否允许指定的用户代理访问"""
    parsed_url = urlparse(url)
    domain = f"{parsed_url.scheme}://{parsed_url.netloc}"

    if domain not in robots_cache:
        rp = RobotFileParser()
        robots_url = f"{domain}/robots.txt"
        try:
            rp.read()  # 自动处理robots.txt获取和解析
            robots_cache[domain] = rp
        except HTTPError as e:
            # 处理HTTP错误状态
            if e.code == 404:
                robots_cache[domain] = True  # 404视为允许访问
            else:
                print(f"robots.txt访问异常 [{robots_url}]: HTTP {e.code}")
                robots_cache[domain] = False
        except Exception as e:
            print(f"robots.txt解析失败 [{robots_url}]: {str(e)}")
            robots_cache[domain] = True

    cached = robots_cache.get(domain)

    if isinstance(cached, RobotFileParser):
        return cached.can_fetch(user_agent, url)
    return cached

def get_json_data(url, headers=None):
    """获取指定URL的JSON数据"""
    try:
        response = requests.post(url, headers=headers, timeout=10)
        response.raise_for_status()
        return response.json()
    except Exception as e:
        print(f"获取JSON数据失败: {e}")
        return None


def download_pdf(pdf_url, save_dir, pdf_name, headers=None):
    """下载并保存PDF文件"""
    # 从URL中提取文件名
    filename = os.path.basename(urlparse(pdf_url).path)
    if not filename:
        filename = "unknown.pdf"
    else:
        # 去除可能的查询参数
        filename = filename.split("?")[0]

    filename = filename + pdf_name + ".pdf"

    # 保存文件
    file_path = os.path.join(save_dir, filename)
    if os.path.exists(file_path):
        print(f"文件已存在: {file_path}")
        return {"status": "success", "filename": filename, "url": pdf_url}

    try:
        response = requests.get(pdf_url, headers=headers, timeout=10)
        response.raise_for_status()

        # 创建保存目录
        os.makedirs(save_dir, exist_ok=True)

        with open(file_path, 'wb') as f:
            f.write(response.content)
        return {"status": "success", "filename": filename, "url": pdf_url}
    except Exception as e:
        print(f"下载失败 [{pdf_url}]: {e}")
        return {"status": "error", "url": pdf_url, "error": str(e)}


def main(api_url, prefix, save_dir="pdfs", field_name="adjunctUrl"):
    # 设置默认请求头
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3'
    }
    # 检查API地址访问权限
    if not is_allowed(api_url, headers['User-Agent']):
        print(f"根据robots.txt规则，禁止访问API地址: {api_url}")
        return

    global global_index
    global global_total

    pageNum = 1
    results = []
    while global_index < global_total:
        print(f"当前索引为: {global_index}")
        new_api_url = api_url + "&pageNum=" + str(pageNum)
        pageNum = pageNum + 1
        print(f"正在处理 {new_api_url}")
        # 获取JSON数据
        json_data = get_json_data(new_api_url, headers=headers)
        if not json_data:
            return

        global_total = json_data["totalAnnouncement"]
        print(f"共{global_total}条记录")

        # 处理JSON数据
        pdfin = json_data['announcements']
        for index, item in enumerate(pdfin, 1):
            pdf_path = item.get(field_name)
            pdf_name = item.get("secName")
            if not pdf_path:
                print(f"记录 {index} 缺少字段 '{field_name}'")
                continue

            # 拼接完整URL
            full_url = urljoin(prefix, pdf_path)
            print(f"正在处理 ({global_index}/{global_total} - {index}/{len(pdfin)}) {full_url}")

            # 下载文件
            result = download_pdf(full_url, save_dir, pdf_name, headers=headers)
            results.append(result)
            global_index = global_index + 1
            time.sleep(1)

        time.sleep(1)
    # 保存结果到JSON文件
    with open(save_dir + '/results.json', 'w') as f:
        json.dump(results, f, indent=2)
        f.close()
    print(f"done, return")


if __name__ == "__main__":
    parser = argparse.ArgumentParser(description="PDF下载工具")
    parser.add_argument("--api_url", help="返回JSON数据的API地址")
    parser.add_argument("--prefix", help="PDF地址前缀")
    parser.add_argument("--save-dir", default="pdfs", help="文件保存目录（默认：pdfs）")
    parser.add_argument("--field", default="adjunctUrl", help="PDF路径字段名（默认：pdf_path）")

    args = parser.parse_args()

    main(
        api_url=args.api_url,
        prefix=args.prefix,
        save_dir=args.save_dir,
        field_name=args.field
    )

