# -*- coding:utf8 -*-
import requests
import json
from urllib import parse
import os
import time
import threading
from retrying import retry  # 需安装：pip install retrying

class BaiduImageSpider(object):
    def __init__(self):
        self.url = 'https://image.baidu.com/search/acjson?tn=resultjson_com&logid=5179920884740494226&ipn=rj&ct=201326592&is=&fp=result&queryWord={}&cl=2&lm=-1&ie=utf-8&oe=utf-8&adpicid=&st=-1&z=&ic=0&hd=&latest=&copyright=&word={}&s=&se=&tab=&width=&height=&face=0&istype=2&qc=&nc=1&fr=&expermode=&nojc=&pn={}&rn=30&gsm=1e&1635054081427='
        self.base_directory = r"D:\梦工厂第一组"  # 基础存储目录
        self.header = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/95.0.4638.54 Safari/537.36 Edg/95.0.1020.30'
        }
        self.lock = threading.Lock()  # 线程锁

    def create_directory(self, category):
        """创建分类目录"""
        dir_path = os.path.join(self.base_directory, category)
        if not os.path.exists(dir_path):
            os.makedirs(dir_path)
        return dir_path

    @retry(stop_max_attempt_number=3, wait_fixed=2000)  # 失败重试3次，间隔2秒
    def get_image_link(self, url):
        """获取图片链接（带重试机制）"""
        try:
            response = requests.get(url, headers=self.header, timeout=10)
            response.raise_for_status()
            json_data = json.loads(response.text)
            return [item['thumbURL'] for item in json_data.get('data', []) if 'thumbURL' in item]
        except Exception as e:
            print(f"获取链接失败: {str(e)}")
            raise

    @retry(stop_max_attempt_number=None, wait_fixed=2000)  # 失败重试无数次
    def download_image(self, img_url, save_path):
        """下载单张图片（带重试机制）"""
        try:
            response = requests.get(img_url, headers=self.header, timeout=10)
            response.raise_for_status()
            with open(save_path, "wb") as f:
                f.write(response.content)
            with self.lock:  # 线程安全打印
                print(f"下载成功: {save_path}")
            return True
        except Exception as e:
            print(f"下载失败 {img_url}: {str(e)}")
            raise

    def download_category(self, category, max_images=2000):
        """单个分类的下载任务"""
        dir_path = self.create_directory(category)
        search_term = parse.quote(category)
        
        downloaded = 0
        page = 0
        while downloaded < max_images:
            page += 1
            pn = page * 30
            url = self.url.format(search_term, search_term, pn)
            
            try:
                links = self.get_image_link(url)
                if not links:
                    print(f"{category} 第{page}页无结果")
                    break
                
                for idx, link in enumerate(links):
                    if downloaded >= max_images:
                        break
                    filename = f"{category}_{downloaded+1}.jpg"
                    save_path = os.path.join(dir_path, filename)
                    if self.download_image(link, save_path):
                        downloaded += 1
                        time.sleep(0.2)  # 降低请求频率
            except Exception as e:
                print(f"{category} 第{page}页处理失败: {str(e)}")
                continue

        print(f"{category} 下载完成，共下载{downloaded}张图片")

def main():
    spider = BaiduImageSpider()
    
    # 输入处理优化（强制英文逗号分割）
    input_str = input("请输入要下载的分类（用英文逗号分隔，例如: 火车,消防栓）: ").strip()
    categories = [cat.strip() for cat in input_str.split(',') if cat.strip()]
    
    threads = []
    
    # 为每个分类启动线程
    for category in categories:
        # 增加分类合法性检查
        if not category:
            continue
        thread = threading.Thread(
            target=spider.download_category,
            args=(category,),  # 确保传递单个分类
            kwargs={'max_images': 2000}
        )
        threads.append(thread)
        thread.start()
    
    # 等待所有线程完成
    for thread in threads:
        thread.join()

if __name__ == '__main__':
    main()