# coding: utf-8
"""
download_heroes_with_ddg.py
用途：优先从维基百科获取人物主图；若网络访问维基失败或返回空，则调用 DuckDuckGo 搜图接口抓第一张结果作为后备。
输出：按 “001.jpg、002.jpg…” 保存。
"""

import os
import time
import requests
from requests.exceptions import RequestException

# duckduckgo_search 库版本 >= 2.x 提供 ddg_images 接口 :contentReference[oaicite:0]{index=0}
try:
    from duckduckgo_search import ddg_images
except ImportError:
    ddg_images = None

# 也可以用 duckduckgo-images-api 包作为备选 :contentReference[oaicite:1]{index=1}
try:
    from duckduckgo_images_api import search as ddg_api_search
except ImportError:
    ddg_api_search = None

HERO_NAMES = [
    ("001", "杨靖宇"),
    ("002", "赵一曼"),
    ("003", "左权"),
    ("004", "张自忠"),
    ("005", "戴安澜"),
    ("006", "彭雪枫"),
    ("007", "李兆麟"),
    ("008", "彭德怀"),
    ("009", "马本斋"),
    ("010", "谢子长"),
    ("011", "邓铁梅"),
    ("012", "周保中"),
    ("013", "王二小"),
    ("014", "陈树湘"),
    ("015", "杨成武"),
    ("016", "冯仲云"),
]

API_ENDPOINT = "https://zh.wikipedia.org/w/api.php"
OUTPUT_DIR = "assets/heroes"
LOG_FILE = "download_with_ddg_log.txt"
SLEEP = 0.5

os.makedirs(OUTPUT_DIR, exist_ok=True)

def safe_get(url, params=None, headers=None, retries=3, sleep_sec=1):
    if headers is None:
        headers = {
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) "
                          "AppleWebKit/537.36 (KHTML, like Gecko) "
                          "Chrome/118.0.0.0 Safari/537.36",
            "Accept": "application/json, text/plain, */*",
        }
    for i in range(retries):
        try:
            r = requests.get(url, params=params, headers=headers, timeout=15, verify=True)
            if r.status_code == 200 and r.text.strip():
                return r
        except RequestException as e:
            # 可加日志
            pass
        time.sleep(sleep_sec)
    return None

def get_wiki_main_image(title):
    params = {
        "action": "query",
        "format": "json",
        "titles": title,
        "prop": "pageimages",
        "piprop": "original",
    }
    r = safe_get(API_ENDPOINT, params=params)
    if not r:
        return None
    try:
        data = r.json()
    except ValueError:
        return None
    pages = data.get("query", {}).get("pages", {})
    for pid, page in pages.items():
        orig = page.get("original")
        if orig and orig.get("source"):
            return orig["source"]
    return None

def get_wiki_via_search(title):
    # 与前脚本类似，用 search → images → imageinfo 方案
    params = {
        "action": "query",
        "format": "json",
        "list": "search",
        "srsearch": title,
        "srlimit": 3,
    }
    r = safe_get(API_ENDPOINT, params=params)
    if not r:
        return None
    try:
        js = r.json()
    except ValueError:
        return None
    results = js.get("query", {}).get("search", [])
    for res in results:
        page_title = res.get("title")
        p2 = {
            "action": "query",
            "format": "json",
            "titles": page_title,
            "prop": "images",
            "imlimit": 10,
        }
        r2 = safe_get(API_ENDPOINT, params=p2)
        if not r2:
            continue
        try:
            j2 = r2.json()
        except ValueError:
            continue
        pages2 = j2.get("query", {}).get("pages", {})
        for pid2, page2 in pages2.items():
            for img in page2.get("images", []):
                img_title = img.get("title")
                if not img_title:
                    continue
                if img_title.lower().endswith((".jpg", ".jpeg", ".png", ".gif")):
                    p3 = {
                        "action": "query",
                        "format": "json",
                        "titles": img_title,
                        "prop": "imageinfo",
                        "iiprop": "url",
                    }
                    r3 = safe_get(API_ENDPOINT, params=p3)
                    if not r3:
                        continue
                    try:
                        j3 = r3.json()
                    except ValueError:
                        continue
                    for pid3, page3 in j3.get("query", {}).get("pages", {}).items():
                        info = page3.get("imageinfo")
                        if info:
                            return info[0].get("url")
    return None

def get_ddg_image(title):
    # 优先用 ddg_images（直接返回图片列表） :contentReference[oaicite:2]{index=2}
    if ddg_images:
        try:
            results = ddg_images(title, region="wt-wt", safesearch="Moderate", max_results=1)
            if results:
                first = results[0]
                url = first.get("image") or first.get("thumbnail") or first.get("url")
                if url:
                    return url
        except Exception:
            pass
    # 若 ddg_images 不可用或返回空，则试 ddg_api_search
    if ddg_api_search:
        try:
            res = ddg_api_search(title)
            # 返回结构中有 “results” 列表，每项有 url 字段 :contentReference[oaicite:3]{index=3}
            lst = res.get("results")
            if lst:
                return lst[0].get("url")
        except Exception:
            pass
    return None

def download_url_to(path, url):
    r = safe_get(url, params=None, headers=None, retries=2, sleep_sec=1)
    if not r:
        return False
    try:
        with open(path, "wb") as f:
            f.write(r.content)
        return True
    except Exception:
        return False

def main():
    succ = []
    fail = []
    with open(LOG_FILE, "w", encoding="utf-8") as log:
        for code, name in HERO_NAMES:
            filename = f"{code}.jpg"
            out = os.path.join(OUTPUT_DIR, filename)
            log.write(f"=== {code} {name} ===\n")
            print(f"Processing {code} {name} …")
            img_url = None
            # 1. 先尝试维基主图
            img_url = get_wiki_main_image(name)
            time.sleep(SLEEP)
            # 2. 若失败，再用搜索维基图片
            if not img_url:
                img_url = get_wiki_via_search(name)
                time.sleep(SLEEP)
            # 3. 若还失败，用 DuckDuckGo 搜图
            if not img_url:
                img_url = get_ddg_image(name)
                time.sleep(SLEEP)
            if img_url:
                log.write(f"Found URL: {img_url}\n")
                ok = download_url_to(out, img_url)
                if ok:
                    log.write(f"Saved to {out}\n\n")
                    succ.append((code, name, img_url))
                    print(f"Saved {out}")
                else:
                    log.write("Download failed\n\n")
                    fail.append((code, name, img_url))
                    print(f"Download failed for {name}")
            else:
                log.write("No image URL\n\n")
                fail.append((code, name, None))
                print(f"No image found for {name}")
    print("Done. Success:", len(succ), "Fail:", len(fail))
    print("See log:", LOG_FILE)

if __name__ == "__main__":
    main()

