import os
import json
import requests
import time

from urllib.parse import unquote

def parse_cookie_string(cookie_str):
    """
    将浏览器复制的 Cookie 字符串转换为字典。
    
    示例输入:
    "Hm_lvt_...=123; JWTUser=%7B...; JSESSIONID=ABC123"
    
    输出:
    {
        "Hm_lvt_...": "123",
        "JWTUser": '{"a":1}',  # 自动解码 URL 编码
        "JSESSIONID": "ABC123"
    }
    """
    cookies = {}
    if not cookie_str or not isinstance(cookie_str, str):
        return cookies

    # 按分号分割每个键值对（注意：有些值里可能有逗号，但分号是标准分隔符）
    pairs = cookie_str.split(";")
    
    for pair in pairs:
        # 去除前后空格
        pair = pair.strip()
        if not pair:
            continue
        # 第一个 '=' 作为分割点（避免值中包含 '='）
        if '=' in pair:
            key, value = pair.split('=', 1)
            key = key.strip()
            value = value.strip()
            # 尝试 URL 解码（如 JWTUser 是 URL 编码的 JSON）
            try:
                value = unquote(value)
            except Exception:
                pass  # 解码失败就保留原值
            cookies[key] = value
        # 如果没有 '='，跳过（非法项）
    
    return cookies


# ====== 配置区 ======
SAVE_DIR = "downloaded_pages"
TOTAL_PAGES = 1  # 你要下载的总页数
BASE_JUMP_URL = "https://drm.lib.pku.edu.cn/jumpServlet"
# 更换为目标FID
FID = ""
FILENAME = f"{FID}.pdf"

# 请务必替换成你当前登录后有效的完整 Cookie（从浏览器复制）
cookies_str =  ""
COOKIES = parse_cookie_string(cookies_str)

HEADERS = {
    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/141.0.0.0 Safari/537.36 Edg/141.0.0.0",
    "Referer": f"https://drm.lib.pku.edu.cn/pdfindex1.jsp?fid={FID}",
    "X-Requested-With": "XMLHttpRequest",
    "Accept": "*/*",
}

# ====== 创建保存目录 ======
os.makedirs(SAVE_DIR, exist_ok=True)

# ====== 第一步：收集所有页面的图片链接 ======
print(f"[*] 开始收集 {TOTAL_PAGES} 页的图片链接...")

collected_pages = {}  # {页码 int: 图片URL str}
page_index = 0
max_request_attempts = TOTAL_PAGES + 50  # 最多额外请求n次防止死循环

while len(collected_pages) < TOTAL_PAGES and page_index < max_request_attempts:
    print(f"[*] 请求 jumpServlet?page={page_index}")
    
    params = {
        "page": page_index,
        "fid": FID,
        "userid": "",
        "filename": FILENAME,
        "visitid": ""
    }

    try:
        resp = requests.get(
            BASE_JUMP_URL,
            params=params,
            cookies=COOKIES,
            headers=HEADERS,
            timeout=10
        )
        
        if resp.status_code == 200:
            try:
                data = resp.json()
            except json.JSONDecodeError:
                print(f"    [!] 返回内容不是 JSON，跳过 page={page_index}")
                page_index += 1
                time.sleep(0.5)
                continue

            new_count = 0
            for item in data.get("list", []):
                try:
                    page_id = int(item["id"])
                    img_url = item["src"]
                except (KeyError, ValueError, TypeError):
                    continue

                # 只保留
                if 0 <= page_id <= TOTAL_PAGES and page_id not in collected_pages:
                    collected_pages[page_id] = img_url
                    new_count += 1

            print(f"    -> 新增 {new_count} 页，当前共 {len(collected_pages)} 页")
            
            # 如果已收集满，提前退出
            if len(collected_pages) >= TOTAL_PAGES:
                break
        else:
            print(f"    [!] 请求失败，状态码: {resp.status_code}")

    except Exception as e:
        print(f"    [!] 请求异常: {e}")

    page_index += 1
    time.sleep(0.5)  # 礼貌延迟，避免被封

# ====== 检查是否收集完整 ======
missing = [i for i in range(0, TOTAL_PAGES - 1) if i not in collected_pages]
if missing:
    print(f"[!] 缺少以下页码：{missing}")
else:
    print(f"[+] 所有 {TOTAL_PAGES} 页链接已收集完毕！")

# ====== 第二步：按页码顺序下载图片 ======
print("[*] 开始下载图片...")

for page_num in range(0, TOTAL_PAGES - 1):
    if page_num not in collected_pages:
        print(f"[-] 跳过 page_{page_num:03d}：无链接")
        continue

    img_url = collected_pages[page_num]
    filename = f"page_{page_num:03d}.png"
    filepath = os.path.join(SAVE_DIR, filename)

    print(f"[*] 正在下载 {filename} ...")
    
    try:
        # 注意：下载图片时也要带 Referer 和 Cookie
        img_resp = requests.get(
            img_url,
            cookies=COOKIES,
            headers={
                "Referer": f"https://drm.lib.pku.edu.cn/pdfindex1.jsp?fid={FID}",
                "User-Agent": HEADERS["User-Agent"]
            },
            timeout=15
        )
        
        if img_resp.status_code == 200:
            with open(filepath, "wb") as f:
                f.write(img_resp.content)
            print(f"[+] 已保存: {filename}")
        else:
            print(f"[-] 下载失败 {filename}，状态码: {img_resp.status_code}")
            
    except Exception as e:
        print(f"[-] 下载异常 {filename}: {e}")
    
    time.sleep(0.3)  # 下载间隔

print("[*] 全部完成！")
