""" import requests
from bs4 import BeautifulSoup

# 目标网页地址
url = "https://image.baidu.com/search/index?ct=201326592&z=2&tn=baiduimage&word=欧美电影海报&pn=&spn=&ie=utf-8&oe=utf-8&cl=2&lm=-1&fr=&se=&sme=&width=0&height=0&cs=&os=&objurl=&di=&gsm=5a&dyTabStr=MCwxMiwzLDEsMiwxMyw3LDYsNSw5"

# 设置请求头模拟浏览器访问（重要！避免被反爬）
headers = {
    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/121.0.0.0 Safari/537.36"
}

# 发送HTTP请求获取网页内容
response = requests.get(url, headers=headers)
response.encoding = "utf-8"  # 设置编码防止乱码

# 使用BeautifulSoup解析HTML
soup = BeautifulSoup(response.text, "html.parser")

print(soup.prettify())
# 通过class定位所有目标图片标签
target_imgs = soup.find_all("img", class_="main_img img-hover")

print("共找到", len(target_imgs), "张图片")
# 提取并逐行输出src属性
for img in target_imgs:
    img_src = img.get("data-imgurl")
    if img_src:  # 过滤空链接
        print(img_src) """

import requests
import re
import json
from urllib.parse import unquote

# 百度图片加密参数解析函数
def decode_url(enc_url):
    dec_url = unquote(enc_url).replace('_z2C$q', ':').replace('_z&e3B', '.')
    return re.sub(r'([a-w])([A-Za-z])', lambda m: m.group(1) + chr(ord(m.group(2)) + 128), dec_url)

# 模拟手机端请求头（反爬能力较弱）
headers = {
    "User-Agent": "Mozilla/5.0 (iPhone; CPU iPhone OS 16_6 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/16.6 Mobile/15E148 Safari/604.1",
    "Referer": "https://image.baidu.com/"
}

# 构造真实数据接口URL
search_word = "欧美电影海报"
url = f"https://image.baidu.com/search/acjson?tn=resultjson_com&ipn=rj&word={search_word}&pn=0&rn=30"

# 发送请求获取加密数据
response = requests.get(url, headers=headers)
data = json.loads(response.text.replace(r"\'", "'"))

# 提取并解密真实图片链接
image_urls = []
for item in data['data']:
    if 'replaceUrl' in item and len(item['replaceUrl']) > 1:
        enc_url = item['replaceUrl'][1]['ObjURL']
        real_url = decode_url(enc_url)
        image_urls.append(real_url)

# 输出结果
print(f"成功解析到 {len(image_urls)} 张图片真实地址：")
for url in image_urls:
    print(url)