import os
import requests
import hashlib
from PIL import Image
import io
import re
from urllib.parse import quote
from requests.adapters import HTTPAdapter
from urllib3.util.retry import Retry

def verify_and_process_image(response, filename):
    """验证并处理图片"""
    try:
        # 检查Content-Type
        content_type = response.headers.get('Content-Type', '')
        if not content_type.startswith('image/'):
            print(f"非图片内容: {content_type}")
            return False
            
        # 检查内容长度
        if len(response.content) < 1024:  # 小于1KB可能不是有效图片
            print(f"内容太小: {len(response.content)} bytes")
            return False
            
        img = Image.open(io.BytesIO(response.content))
        
        # 检查图片尺寸
        if img.size[0] < 100 or img.size[1] < 100:  # 太小的图片可能是图标
            print(f"图片尺寸太小: {img.size}")
            return False
            
        # 调整图片大小为800x600（保持比例）
        max_size = (800, 600)
        img.thumbnail(max_size, Image.LANCZOS)
        
        # 保存为PNG格式
        img.save(filename, "PNG", quality=95)
        
        # 计算文件hash用于验证
        file_hash = hashlib.md5(response.content).hexdigest()
        print(f"图片处理成功: {filename}, Hash: {file_hash}")
        return True
    except Exception as e:
        print(f"图片处理失败 {filename}: {str(e)}")
        print(f"Response headers: {response.headers}")
        return False

def download_image(url, filename, session=None):
    """下载并处理图片"""
    if session is None:
        session = create_session()
    
    try:
        # 添加请求头
        headers = {
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36",
            "Accept": "image/webp,image/*,*/*;q=0.8",
            "Accept-Language": "zh-CN,zh;q=0.8,en-US;q=0.5,en;q=0.3",
        }
        
        # 使用session进行下载
        response = session.get(url, headers=headers, verify=False, timeout=15)
        response.raise_for_status()
        
        if verify_and_process_image(response, filename):
            print(f"成功下载和处理: {filename}")
        else:
            print(f"图片验证失败: {filename}")
    except Exception as e:
        print(f"下载失败 {filename}: {str(e)}")
        return False
    return True

def create_session():
    """创建带有重试机制的会话"""
    session = requests.Session()
    retry_strategy = Retry(
        total=3,  # 总重试次数
        backoff_factor=1,  # 重试间隔
        status_forcelist=[500, 502, 503, 504]  # 需要重试的HTTP状态码
    )
    adapter = HTTPAdapter(max_retries=retry_strategy)
    session.mount("http://", adapter)
    session.mount("https://", adapter)
    return session

def search_baidu_images(keyword, mushroom_name=None):
    """从百度搜索图片"""
    session = create_session()
    
    # 添加请求头定义
    headers = {
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36",
        "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8",
        "Accept-Language": "zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2",
        "Referer": "https://image.baidu.com"
    }
    
    # 构建多个搜索关键词
    search_keywords = [keyword]
    if mushroom_name:
        alternative_keywords = {
            "Fly Agaric": ["红白点蘑菇", "毒蝇蕈", "红蘑菇白点"],
            "Death Cap Mushroom": ["致命鹅膏菌", "毒蝇伞图片"],
            "Destroying Angel": ["白毒伞图片", "毒蝇科白蘑菇"],
            "False Morel": ["假羊肚菌图片", "褶皱菌"]
        }
        if mushroom_name in alternative_keywords:
            search_keywords.extend(alternative_keywords[mushroom_name])
    
    # 尝试每个关键词
    for kw in search_keywords:
        try:
            encoded_kw = quote(kw)
            # 尝试多个页码
            for pn in range(0, 30, 10):
                url = f"https://image.baidu.com/search/flip?tn=baiduimage&word={encoded_kw}&pn={pn}"
                response = session.get(url, headers=headers, verify=False, timeout=10)
                response.raise_for_status()
                
                img_urls = re.findall('"objURL":"(.*?)"', response.text)
                # 过滤掉一些不太可能的域名
                img_urls = [url for url in img_urls if not any(x in url for x in ['enterdesk.com'])]
                
                for img_url in img_urls:
                    try:
                        if download_image(img_url, f"images/temp.png", session):
                            return img_url
                    except Exception as e:
                        continue
        except Exception as e:
            print(f"搜索失败 {kw}: {str(e)}")
            continue
    
    return None

# 创建images目录（如果不存在）
if not os.path.exists('images'):
    os.makedirs('images')

# 需要下载的毒蘑菇图片
mushrooms = [
    {"name": "Death Cap Mushroom", "query": "毒蝇伞 蘑菇"},
    {"name": "Destroying Angel", "query": "白毒伞 蘑菇"},
    {"name": "False Morel", "query": "假羊肚菌 蘑菇"},
    {"name": "Fly Agaric", "query": "红菇 白点 蘑菇"}
]

# 下载所有图片
for mushroom in mushrooms:
    try:
        image_url = search_baidu_images(mushroom["query"], mushroom["name"])
        if image_url:
            filename = f"images/{mushroom['name']}.png"
            # 如果临时文件存在，直接重命名
            if os.path.exists("images/temp.png"):
                os.rename("images/temp.png", filename)
                print(f"成功保存: {filename}")
        else:
            print(f"未找到图片: {mushroom['name']}")
    except Exception as e:
        print(f"处理失败 {mushroom['name']}: {str(e)}") 