
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
依赖库：pip install requests
精易论坛2025开源大赛(第八届)
"""
import os
import time
import requests
import urllib3
from concurrent.futures import ThreadPoolExecutor, as_completed
from urllib.parse import unquote

# ========== 只需要更换 kwssectoken(30分钟失效一次) ==========
COOKIE = (
    "kpf=PC_WEB; kpn=KUAISHOU_VISION; clientid=3; "
    "did=web_667e562e10cfdb06d4866a5bddd65382; "
    "kwpsecproductname=kuaishou-vision; "
    " kwssectoken=Xzeu1hxaf8iXrBD/hcYx7O2+aQ9IrP9TnkASjuDtzSCy+0jAtdG+zEeqCFwGMRgw4kuWVAYKUblATwPK2Z7yWQ==; "
    "kwscode=47ab2e018ffd9a5082f18bdb7573fdbd8685d52802237835ce948df0fea3c848"
)
SAVE_DIR = "ks_mp4"          # 保存路径文件夹
MAX_WORKER = 6               # 线程
PER_BATCH = 30               # 并发量
RETRY = 3                    # 失败重试次数

# 关闭ssl
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)

URL = "https://www.kuaishou.com/graphql"
HEADERS = {
    "Origin": "https://www.kuaishou.com",
    "Referer": "https://www.kuaishou.com/brilliant",
    "X-Requested-With": "XMLHttpRequest",
    "User-Agent": ("Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 "
                   "(KHTML, like Gecko) Chrome/140.0.0.0 Safari/537.36 Edg/140.0.0.0"),
    "Cookie": COOKIE
}

QUERY = """
query brilliantTypeDataQuery(
  $pcursor: String, $hotChannelId: String, $page: String, $webPageArea: String
) {
  brilliantTypeData(
    pcursor: $pcursor, hotChannelId: $hotChannelId,
    page: $page, webPageArea: $webPageArea
  ) {
    result
    pcursor
    feeds {
      type
      author { id name }
      photo {
        __typename
        ... on PhotoEntity {
          id caption photoUrl coverUrl duration likeCount viewCount
        }
        ... on recoPhotoEntity {
          id caption photoUrl coverUrl duration likeCount viewCount
        }
      }
    }
  }
}
"""

VARS = {
    "hotChannelId": "00",
    "page": "brilliant",
    "pcursor": "",
    "webPageArea": "hot_channel_version_b"
}

def safe_name(name: str) -> str:
    return "".join(c if c.isalnum() or c in (" ", "-", "_", "#") else "_" for c in name).rstrip()[:80]


def download_one(item: dict):
    url, title = item["mp4"], item["title"]
    fname = f"{safe_name(title)}.mp4"
    path = os.path.join(SAVE_DIR, fname)
    if os.path.exists(path):
        return f"[skip] {fname}"

    headers = {"Referer": "https://www.kuaishou.com/"}
    sess = requests.Session()
    sess.headers.update(headers)

    for attempt in range(1, RETRY + 1):
        try:
            resp = sess.head(url, allow_redirects=True, timeout=10)
            real_url = resp.url
            with sess.get(real_url, headers=headers, stream=True, timeout=60,
                          verify=False, allow_redirects=True) as r:
                r.raise_for_status()
                with open(path, "wb") as f:
                    for chunk in r.iter_content(chunk_size=1024 * 64):
                        if chunk:
                            f.write(chunk)
            return f"[save] {path}"
        except requests.exceptions.SSLError:
            print(f"[warn] SSL EOF on {fname}  attempt={attempt}")
            time.sleep(2 * attempt)
        except Exception as e:
            print(f"[warn] download fail  attempt={attempt}  {e}")
            time.sleep(2 * attempt)
    return f"[fail] {fname}  after {RETRY} retries"


def batch_download(mp4_list):
    with ThreadPoolExecutor(MAX_WORKER) as pool:
        futures = [pool.submit(download_one, item) for item in mp4_list]
        for f in as_completed(futures):
            print(f.result())

def crawl():
    os.makedirs(SAVE_DIR, exist_ok=True)
    pcursor = ""
    page_no = 1
    sess = requests.Session()
    sess.headers.update(HEADERS)

    while True:
        print(f"\n>>> 第 {page_no} 页 pcursor={pcursor[:20]}...")
        variables = VARS.copy()
        variables["pcursor"] = pcursor

        resp = sess.post(URL, json={"operationName": "brilliantTypeDataQuery",
                                    "query": QUERY,
                                    "variables": variables})
        if resp.status_code != 200:
            print("[!] non-200 response", resp.text[:200])
            break

        data = resp.json()
        feeds = data["data"]["brilliantTypeData"]["feeds"]
        pcursor = data["data"]["brilliantTypeData"]["pcursor"]

        if not feeds:
            print("[*] no more data")
            break

        mp4_list = []
        for f in feeds:
            photo = f["photo"]
            if not photo:
                continue
            title = photo.get("caption", "无标题")
            mp4 = photo.get("photoUrl") or photo.get("photoH265Url")
            if not mp4:
                continue
            print(title)
            mp4_list.append({"title": title, "mp4": unquote(mp4)})
            if len(mp4_list) >= PER_BATCH:
                batch_download(mp4_list)
                mp4_list.clear()
        if mp4_list:
            batch_download(mp4_list)

        if pcursor == "no_more":
            print("[*] reach end")
            break
        time.sleep(1)   # 礼貌间隔
        page_no += 1


if __name__ == "__main__":
    crawl()