# -*- coding:utf8 -*-
import requests
import json
from urllib import parse
import os
import time
import threading
from retrying import retry  # 需安装：pip install retrying


# 检查网络连接的函数
def check_network():
    try:
        requests.get('https://www.baidu.com', timeout=5)
        return True
    except requests.RequestException:
        return False


# 带断网重连的请求函数，持续尝试重连
def request_with_reconnect(url, headers, timeout=10):
    retries = 0
    while True:
        if check_network():
            try:
                response = requests.get(url, headers=headers, timeout=timeout)
                response.raise_for_status()
                return response
            except requests.RequestException as e:
                retries += 1
                print(f"请求出错: {str(e)}, 尝试第 {retries} 次重连...")
        else:
            retries += 1
            print(f"网络连接中断, 尝试第 {retries} 次重连...")
        time.sleep(5)  # 等待5秒后重试


class BaiduImageSpider(object):
    def __init__(self):
        self.url = 'https://image.baidu.com/search/acjson?tn=resultjson_com&logid=5179920884740494226&ipn=rj&ct=201326592&is=&fp=result&queryWord={}&cl=2&lm=-1&ie=utf-8&oe=utf-8&adpicid=&st=-1&z=&ic=0&hd=&latest=&copyright=&word={}&s=&se=&tab=&width=&height=&face=0&istype=2&qc=&nc=1&fr=&expermode=&nojc=&pn={}&rn=30&gsm=1e&1635054081427='
        self.base_directory = r"D:\梦工厂第一组"  # 基础存储目录
        self.header = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/95.0.4638.54 Safari/537.36 Edg/95.0.1020.30'
        }
        self.lock = threading.Lock()  # 线程锁

    def create_directory(self, category):
        """创建分类目录"""
        dir_path = os.path.join(self.base_directory, category)
        if not os.path.exists(dir_path):
            os.makedirs(dir_path)
        return dir_path

    @retry(stop_max_attempt_number=3, wait_fixed=2000)  # 失败重试3次，间隔2秒
    def get_image_link(self, url):
        """获取图片链接（带重试机制）"""
        response = request_with_reconnect(url, self.header)
        if response:
            try:
                json_data = json.loads(response.text)
                return [item['thumbURL'] for item in json_data.get('data', []) if 'thumbURL' in item]
            except Exception as e:
                print(f"获取链接失败: {str(e)}")
                raise
        return []

    @retry(stop_max_attempt_number=3, wait_fixed=2000)  # 失败重试3次
    def download_image(self, img_url, save_path):
        """下载单张图片（带重试机制）"""
        response = request_with_reconnect(img_url, self.header)
        if response:
            try:
                with open(save_path, "wb") as f:
                    f.write(response.content)
                with self.lock:  # 线程安全打印
                    print(f"下载成功: {save_path}")
                return True
            except Exception as e:
                print(f"下载失败 {img_url}: {str(e)}")
                raise
        return False

    def download_category(self, category, keywords, max_images=4000):
        """单个分类的下载任务"""
        dir_path = self.create_directory(category)
        downloaded = 0
        downloaded_urls = set()  # 用于存储已下载的图片URL，避免重复下载

        for keyword in keywords:
            search_term = parse.quote(keyword)
            page = 0
            while downloaded < max_images:
                page += 1
                pn = page * 30
                url = self.url.format(search_term, search_term, pn)

                try:
                    links = self.get_image_link(url)
                    if not links:
                        print(f"{keyword} 第{page}页无结果")
                        break

                    for idx, link in enumerate(links):
                        if downloaded >= max_images:
                            break
                        if link in downloaded_urls:
                            continue
                        filename = f"{category}_{downloaded + 1}.jpg"
                        save_path = os.path.join(dir_path, filename)
                        if self.download_image(link, save_path):
                            downloaded += 1
                            downloaded_urls.add(link)
                            time.sleep(0.2)  # 降低请求频率
                except Exception as e:
                    print(f"{keyword} 第{page}页处理失败: {str(e)}")
                    continue

        print(f"{category} 下载完成，共下载{downloaded}张图片")


def main():
    spider = BaiduImageSpider()

    # 指定要下载的分类及相关关键词
    category_keywords = {
        '人': ['人', '男人', '女人', '小孩', '人群', '老人', '年轻人', '上班族', '学生'],
        '消防栓': ['消防栓', '室内消防栓', '室外消防栓', '红色消防栓', '家用消防栓', '工业消防栓'],
        '红绿灯': ['红绿灯', '交通红绿灯', '十字路口红绿灯', '圆形红绿灯', '箭头红绿灯', '智能红绿灯'],
        '斑马线': ['斑马线', '人行横道斑马线', '彩色斑马线', '学校门口斑马线', '路口斑马线'],
        '货车': ['货车', '重型货车', '厢式货车', '货车运输', '货车装卸', '冷藏货车', '平板货车'],
        '小轿车': ['小轿车', '轿车', '跑车', '家用轿车', '豪华轿车', '新能源轿车', '紧凑型轿车'],
        '火车': ['火车', '动车', '高铁', '火车轨道', '火车头', '货运火车', '客运火车'],
        '自行车': ['自行车', '山地自行车', '公路自行车', '自行车骑行', '自行车比赛', '折叠自行车', '电动自行车'],
        '飞机': ['飞机', '客机', '战斗机', '直升机', '飞机起飞', '飞机降落', '私人飞机', '军用飞机'],
        '船': ['船', '轮船', '帆船', '游艇', '货船', '客船', '渔船', '军舰']
    }

    threads = []

    # 为每个分类启动线程
    for category, keywords in category_keywords.items():
        # 增加分类合法性检查
        if not category:
            continue
        thread = threading.Thread(
            target=spider.download_category,
            args=(category, keywords),
            kwargs={'max_images': 4000}
        )
        threads.append(thread)
        thread.start()

    # 等待所有线程完成
    for thread in threads:
        thread.join()


if __name__ == '__main__':
    main()