import sys
import os
import re
import json
import time
import html
import hashlib
from urllib.parse import quote_plus
from urllib.request import Request, urlopen

USER_AGENT = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36"

def fetch_page(url):
    req = Request(url, headers={"User-Agent": USER_AGENT, "Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8"})
    with urlopen(req, timeout=20) as resp:
        charset = resp.headers.get_content_charset() or "utf-8"
        return resp.read().decode(charset, errors="ignore")

def parse_image_urls(html_text):
    unescaped = html.unescape(html_text)
    urls = []
    for m_val in re.findall(r'class="iusc"[^>]*\sm="([^"]+)"', unescaped):
        try:
            data = json.loads(m_val)
            u = data.get("murl")
            if u:
                urls.append(u)
        except Exception:
            continue
    if not urls:
        for u in re.findall(r'"murl":"(.*?)"', unescaped):
            urls.append(u)
    return urls

def safe_ext(content_type):
    if not content_type:
        return ".jpg"
    ct = content_type.lower()
    if "jpeg" in ct:
        return ".jpg"
    if "png" in ct:
        return ".png"
    if "gif" in ct:
        return ".gif"
    if "webp" in ct:
        return ".webp"
    if "bmp" in ct:
        return ".bmp"
    return ".jpg"

def download_image(url, out_dir, index):
    req = Request(url, headers={"User-Agent": USER_AGENT, "Accept": "image/*,*/*;q=0.8"})
    try:
        with urlopen(req, timeout=30) as resp:
            ctype = resp.headers.get("Content-Type", "")
            if not ctype.startswith("image/") and "octet-stream" not in ctype:
                return None
            data = resp.read()
            if not data or len(data) < 1024:
                return None
            h = hashlib.sha1(url.encode("utf-8")).hexdigest()[:10]
            ext = safe_ext(ctype)
            fname = f"img_{index:03d}_{h}{ext}"
            path = os.path.join(out_dir, fname)
            with open(path, "wb") as f:
                f.write(data)
            return path
    except Exception:
        return None

def scrape(query, target_count=200, delay=0.8):
    base = "https://cn.bing.com/images/async"
    q = quote_plus(query)
    collected = []
    seen = set()
    offset = 0
    while len(collected) < target_count and offset < 10000:
        url = f"{base}?q={q}&first={offset}&count=50&relp=50&scenario=ImageBasicHover&datsrc=I&layout=RowBased&mmasync=1"
        try:
            page = fetch_page(url)
        except Exception:
            time.sleep(delay)
            offset += 50
            continue
        urls = parse_image_urls(page)
        for u in urls:
            if u in seen:
                continue
            seen.add(u)
            collected.append(u)
            if len(collected) >= target_count:
                break
        offset += 50
        time.sleep(delay)
    return collected[:target_count]

def main():
    query = "各类商品图片"
    if len(sys.argv) >= 2:
        arg = sys.argv[1].strip()
        if arg:
            query = arg
    out_root = os.getcwd()
    out_dir = os.path.join(out_root, "bing_images")
    os.makedirs(out_dir, exist_ok=True)
    urls = scrape(query, 600)
    saved = []
    i = 1
    for u in urls:
        p = download_image(u, out_dir, i)
        if p:
            saved.append((p, u))
            i += 1
        time.sleep(0.5)
        if i > 200:
            break
    url_file = os.path.join(out_dir, "urls.txt")
    with open(url_file, "w", encoding="utf-8") as f:
        for p, u in saved:
            f.write(f"{os.path.basename(p)}, {u}\n")
    print(f"downloaded={len(saved)}")
    print(f"images_dir={out_dir}")
    print(f"url_list={url_file}")

if __name__ == "__main__":
    main()