import requests
import json
import os
import time
import re
from urllib.parse import quote, unquote
import random


class BaiduImageDownloader:
    def __init__(self):
        self.session = requests.Session()
        # 设置请求头，模拟浏览器行为
        self.session.headers.update({
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
            'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
            'Connection': 'keep-alive',
            'Referer': 'https://image.baidu.com/'
        })

    def download_images_method1(self, keyword, page_count=5, save_dir="./baidu_images"):
        """
        方法1：通过百度图片搜索接口获取图片:cite[2]:cite[8]
        这种方法相对稳定，直接获取图片的URL
        """
        print(f"使用方法1下载关键词 '{keyword}' 的图片，共 {page_count} 页")

        # 创建保存目录
        if not os.path.exists(save_dir):
            os.makedirs(save_dir)

        # 为关键词创建子目录
        keyword_dir = os.path.join(save_dir, keyword)
        if not os.path.exists(keyword_dir):
            os.makedirs(keyword_dir)

        total_downloaded = 0

        # 每页通常有30张图片
        for page in range(page_count):
            print(f"正在获取第 {page + 1} 页图片...")

            # 百度图片搜索接口参数:cite[2]:cite[8]
            params = {
                'tn': 'resultjson_com',
                'ipn': 'rj',
                'ct': 201326592,
                'is': '',
                'fp': 'result',
                'queryWord': keyword,
                'cl': 2,
                'lm': -1,
                'ie': 'utf-8',
                'oe': 'utf-8',
                'adpicid': '',
                'st': -1,
                'z': '',
                'ic': 0,
                'word': keyword,
                's': '',
                'se': '',
                'tab': '',
                'width': '',
                'height': '',
                'face': 0,
                'istype': 2,
                'qc': '',
                'nc': 1,
                'fr': '',
                'pn': page * 30,  # 从第几张图片开始
                'rn': 30,  # 每页图片数量
                'gsm': '1e',
                '1488942260214': ''
            }

            try:
                # 发送请求获取图片数据:cite[8]
                url = 'https://image.baidu.com/search/acjson'
                response = self.session.get(url, params=params, timeout=10)
                response.encoding = 'utf-8'

                # 解析返回的JSON数据:cite[8]
                data = response.json()

                if 'data' not in data:
                    print(f"第 {page + 1} 页没有获取到数据")
                    continue

                # 遍历图片数据
                for i, item in enumerate(data['data']):
                    if not item:
                        continue

                    # 获取图片URL (thumbURL是缩略图，middleURL是中尺寸图)
                    img_url = item.get('thumbURL') or item.get('middleURL')
                    if not img_url:
                        continue

                    try:
                        # 下载图片
                        file_name = f"{keyword}_{page + 1}_{i + 1}.jpg"
                        file_path = os.path.join(keyword_dir, file_name)

                        img_response = self.session.get(img_url, timeout=15)
                        if img_response.status_code == 200:
                            with open(file_path, 'wb') as f:
                                f.write(img_response.content)
                            total_downloaded += 1
                            print(f"已下载: {file_name}")
                        else:
                            print(f"下载失败: {img_url}")

                        # 添加随机延迟，避免请求过于频繁
                        time.sleep(random.uniform(0.1, 0.5))

                    except Exception as e:
                        print(f"下载图片时出错: {e}")
                        continue

            except Exception as e:
                print(f"获取第 {page + 1} 页时出错: {e}")
                continue

        print(f"方法1下载完成，共下载 {total_downloaded} 张图片")
        return total_downloaded

    def download_images_method2(self, keyword, page_count=5, save_dir="./baidu_images"):
        """
        方法2：通过解析百度图片搜索页面获取图片:cite[4]:cite[6]
        这种方法直接解析HTML页面中的图片链接
        """
        print(f"使用方法2下载关键词 '{keyword}' 的图片，共 {page_count} 页")

        # 创建保存目录
        if not os.path.exists(save_dir):
            os.makedirs(save_dir)

        # 为关键词创建子目录
        keyword_dir = os.path.join(save_dir, keyword)
        if not os.path.exists(keyword_dir):
            os.makedirs(keyword_dir)

        total_downloaded = 0
        encoded_keyword = quote(keyword)

        for page in range(page_count):
            print(f"正在获取第 {page + 1} 页图片...")

            # 百度图片搜索URL:cite[4]:cite[6]
            pn = page * 20
            url = f'https://image.baidu.com/search/flip?tn=baiduimage&ie=utf-8&word={encoded_keyword}&pn={pn}'

            try:
                response = self.session.get(url, timeout=10)
                response.encoding = 'utf-8'
                html_content = response.text

                # 使用正则表达式提取图片URL:cite[4]:cite[6]
                # 匹配objURL字段，这是原图链接
                pattern = r'"objURL":"(.*?)"'
                image_urls = re.findall(pattern, html_content)

                # 也匹配hoverURL字段作为备选
                if not image_urls:
                    pattern = r'"hoverURL":"(.*?)"'
                    image_urls = re.findall(pattern, html_content)

                if not image_urls:
                    print(f"第 {page + 1} 页没有找到图片链接")
                    continue

                print(f"第 {page + 1} 页找到 {len(image_urls)} 个图片链接")

                # 下载图片
                for i, img_url in enumerate(image_urls):
                    # 对URL进行解码处理
                    img_url = unquote(img_url).replace('\\u002F', '/')

                    try:
                        file_name = f"{keyword}_p{page + 1}_{i + 1}.jpg"
                        file_path = os.path.join(keyword_dir, file_name)

                        img_response = self.session.get(img_url, timeout=15)
                        if img_response.status_code == 200:
                            with open(file_path, 'wb') as f:
                                f.write(img_response.content)
                            total_downloaded += 1
                            print(f"已下载: {file_name}")
                        else:
                            print(f"下载失败: {img_url}")

                        # 添加随机延迟，避免请求过于频繁
                        time.sleep(random.uniform(0.1, 0.5))

                    except Exception as e:
                        print(f"下载图片时出错: {e}")
                        continue

            except Exception as e:
                print(f"获取第 {page + 1} 页时出错: {e}")
                continue

        print(f"方法2下载完成，共下载 {total_downloaded} 张图片")
        return total_downloaded

    def download_images(self, keyword, page_count=5, save_dir="./baidu_images", method=1):
        """
        下载图片的主函数
        """
        start_time = time.time()

        if method == 1:
            result = self.download_images_method1(keyword, page_count, save_dir)
        else:
            result = self.download_images_method2(keyword, page_count, save_dir)

        end_time = time.time()
        elapsed_time = end_time - start_time
        print(f"总共耗时: {elapsed_time:.2f} 秒")

        return result


def main():
    """
    主函数，用户交互界面
    """
    downloader = BaiduImageDownloader()

    print("=== 百度图片下载器 ===")
    keyword = '河边施工'

    page_count = 5

    save_dir = './baidu_images'

    print("\n请选择下载方法:")
    print("1. 方法1 - 通过API接口 (推荐)")
    print("2. 方法2 - 解析HTML页面")

    method = 2
    print("\n开始下载...")
    downloader.download_images(keyword, page_count, save_dir, method)


if __name__ == "__main__":
    main()