#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Selenium 百度图片搜索爬虫
使用真实的浏览器自动化获取壁纸图片
"""
import hashlib
import json
import logging
import os
import time
from concurrent.futures import ThreadPoolExecutor, as_completed
from typing import Any
from urllib.parse import urlparse, unquote

import requests
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
from tqdm import tqdm

# 设置日志
logging.basicConfig(
    level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s"
)
logger = logging.getLogger(__name__)


class SeleniumBaiduCrawler:
    def __init__(
        self,
            user_data_dir: str,
            executable_path: str,
        binary_location: str,
        cookie: str,
        keyword="壁纸",
        max_pages=20,
        base_dir="selenium_baidu_wallpapers",
    ):
        self.user_data_dir = user_data_dir
        self.binary_location = binary_location
        self.executable_path = executable_path
        self.keyword = keyword
        self.max_pages = max_pages
        self.base_dir = base_dir

        # 设置请求头
        self.headers = {
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/138.0.0.0 Safari/537.36",
            "Referer": "https://image.baidu.com/",
            "Accept": "image/webp,image/apng,image/*,*/*;q=0.8",
            "Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
            "Cookie": cookie,
        }

        # 设置目录
        self.setup_directories()

        # 初始化浏览器
        self.driver = None
        self.wait = None

        # 线程锁，用于线程安全的文件操作
        # self.file_lock = threading.Lock()

    def setup_directories(self):
        """设置目录结构"""
        dirs = [
            self.base_dir,
            os.path.join(self.base_dir, "images"),
            os.path.join(self.base_dir, "data"),
            os.path.join(self.base_dir, "logs"),
        ]

        for dir_path in dirs:
            os.makedirs(dir_path, exist_ok=True)

    def init_driver(self):
        """初始化Chrome浏览器"""
        if self.driver:
            return True
        print("🚀 正在初始化Chrome浏览器...")
        try:
            ser = Service(executable_path=self.executable_path)
            ser.executable_path = self.executable_path
            options = Options()
            options.binary_location = self.binary_location
            user_data_dir = self.user_data_dir

            options.add_argument(f'--user-data-dir={user_data_dir}')
            options.add_argument("--headless")  # 无头模式
            options.add_argument("--no-sandbox")
            options.add_argument("--disable-dev-shm-usage")
            options.add_argument("--disable-gpu")
            options.add_argument("--window-size=1920,1080")
            options.add_argument("--disable-blink-features=AutomationControlled")
            options.add_argument('--disable-dev-shm-usage')  # fix:DevToolsActivePort file doesn't exist
            options.add_argument('--remote-debugging-port=9222')  # fix:DevToolsActivePort file doesn't
            options.add_experimental_option(
                "excludeSwitches", ["enable-automation"]
            )
            options.add_experimental_option("useAutomationExtension", False)

            self.driver = webdriver.Chrome(options=options, service=ser)
            print(self.driver)
            self.wait = WebDriverWait(self.driver, 10)


            # 设置cookie
            # search_url = r"https://image.baidu.com/search/index?tn=baiduimage&fm=result&ie=utf-8&word=%E5%A3%81%E7%BA%B8"
            # logger.info(f"🔗 访问搜索页面: {search_url}")
            # self.driver.get(
            #     search_url
            # )
            time.sleep(2)

            logger.info("✅ Chrome浏览器初始化成功")
            # self.driver.close()
            return True

        except Exception as e:
            logger.error(f"❌ 浏览器初始化失败: {str(e)}")
            if self.driver:
                print("关闭浏览器")
                self.driver.quit()
            return False

    def get_search_url(self):
        """获取搜索URL"""
        base_url = "https://image.baidu.com/search/index"
        params = {
            "tn": "baiduimage",
            "ipn": "r",
            "ct": "201326592",
            "cl": "2",
            "lm": "-1",
            "st": "-1",
            "fr": "",
            "sf": "1",
            "fmq": "1526269427171_R",
            "pv": "",
            "ic": "0",
            "nc": "1",
            "z": "",
            "se": "1",
            "showtab": "0",
            "fb": "0",
            "width": "",
            "height": "",
            "face": "0",
            "istype": "2",
            "ie": "utf-8",
            "word": self.keyword,
        }

        url = f"{base_url}?{'&'.join([f'{k}={v}' for k, v in params.items()])}"
        return url

    def scroll_to_load_images(self, times=5):
        """滚动加载更多图片"""
        logger.info(f"🔄 开始滚动加载图片，滚动次数: {times}")

        for i in range(times):
            # 滚动到页面底部
            self.driver.execute_script(
                "window.scrollTo(0, document.body.scrollHeight);"
            )
            time.sleep(2)  # 等待加载

            # 滚动回顶部一点，确保所有图片都加载
            self.driver.execute_script(
                "window.scrollTo(0, document.body.scrollHeight - 1000);"
            )
            time.sleep(1)

            logger.info(f"📊 第 {i+1} 次滚动完成")

    def extract_image_data(self):
        """提取图片数据"""
        logger.info("🔍 开始提取图片数据...")

        try:
            print("开始等待图片加载...")
            # 等待图片加载
            self.wait.until(
                EC.presence_of_element_located((By.CSS_SELECTOR, "#waterfall"))
            )

            print("图片加载完成")
            # 使用JavaScript获取所有图片信息
            script = """
            var images = [];
            var imgElements = document.querySelectorAll('img');
            //var imgElements = document.querySelectorAll('img.main_img, .imgitem img, .img-wrap img');
            
            for(var i = 0; i < imgElements.length; i++) {
                var img = imgElements[i];
                var parent = img.parentElement;
                
                // 获取原图URL
                var originalUrl = img.getAttribute('data-objurl') || img.src || '';
                
                // 获取缩略图URL
                var thumbUrl = img.src || '';
                
                // 获取下载链接
                var downloadLink = '';
                var downloadBtn = parent.querySelector('a[title="下载原图"], .download-btn, [download]');
                if(downloadBtn) {
                    downloadLink = downloadBtn.href || '';
                }
                
                // 获取标题
                var title = img.alt || img.title || '壁纸_' + i;
                
                images.push({
                    'originalUrl': originalUrl,
                    'thumbUrl': thumbUrl,
                    'downloadUrl': downloadLink,
                    'title': title,
                    'index': i
                });
            }
            
            return images;
            """

            images = self.driver.execute_script(script)
            logger.info(f"📊 成功提取 {len(images)} 张图片数据")

            return images

        except Exception as e:
            logger.error(f"❌ 提取图片数据失败: {str(e)}")
            return []

    def download_image(self, image_info, save_dir, index):
        """下载单张图片"""
        url = image_info.get("originalUrl", "") or image_info.get("thumbUrl", "")
        if not url:
            return {"success": False, "error": "No URL found"}

        try:
            # 生成文件名
            filename = self.get_filename_from_url(url, index)
            filepath = os.path.join(save_dir, filename)

            # 跳过已下载的文件
            if os.path.exists(filepath):
                return {"success": True, "skipped": True, "filepath": filepath}

            # 下载图片
            response = requests.get(url, headers=self.headers, timeout=30, stream=True)
            response.raise_for_status()

            # with self.file_lock:
            with open(filepath, "wb") as f:
                for chunk in response.iter_content(chunk_size=8192):
                    if chunk:
                        f.write(chunk)

            return {
                "success": True,
                "filepath": filepath,
                "size": os.path.getsize(filepath),
                "url": url,
                "title": image_info.get("title", filename),
            }

        except Exception as e:
            return {"success": False, "error": str(e), "url": url}

    def get_filename_from_url(self, url, index):
        """从URL生成文件名"""
        try:
            parsed = urlparse(url)
            filename = os.path.basename(unquote(parsed.path))

            # 清理文件名
            filename = "".join(
                c for c in filename if c.isalnum() or c in (" ", "-", "_", ".")
            )

            if not filename or "." not in filename:
                ext = ".jpg"
                if ".png" in url.lower():
                    ext = ".png"
                elif ".jpeg" in url.lower():
                    ext = ".jpeg"
                elif ".webp" in url.lower():
                    ext = ".webp"

                url_hash = hashlib.md5(url.encode()).hexdigest()[:8]
                filename = f"baidu_wallpaper_{index:04d}_{url_hash}{ext}"

            return filename + ".jpg" if not filename.endswith(".jpg") else filename
        except Exception as e:
            logger.error(f"❌ 生成文件名失败: {str(e)}")
            url_hash = hashlib.md5(url.encode()).hexdigest()[:8]
            return f"baidu_wallpaper_{index:04d}_{url_hash}.jpg"

    def save_page_data(self, page_num, images):
        """保存每页的图片数据"""
        page_dir = os.path.join(self.base_dir, "data", f"page_{page_num:02d}")
        os.makedirs(page_dir, exist_ok=True)

        # 保存JSON数据
        json_path = os.path.join(page_dir, "images.json")
        with open(json_path, "w", encoding="utf-8") as f:
            json.dump(images, f, ensure_ascii=False, indent=2)

        # 保存文本列表
        txt_path = os.path.join(page_dir, "urls.txt")
        with open(txt_path, "w", encoding="utf-8") as f:
            for img in images:
                f.write(f"标题: {img.get('title', '无标题')}\n")
                f.write(f"原图URL: {img.get('originalUrl', '无')}\n")
                f.write(f"缩略图URL: {img.get('thumbUrl', '无')}\n")
                f.write(f"下载URL: {img.get('downloadUrl', '无')}\n")
                f.write("-" * 50 + "\n")

        return page_dir

    def crawl_page(self, page_num):
        """爬取单页"""
        logger.info(f"🚀 开始爬取第 {page_num} 页...")

        # 滚动加载图片
        self.scroll_to_load_images(times=3)

        # 提取图片数据
        images = self.extract_image_data()

        if not images:
            logger.warning(f"⚠️ 第 {page_num} 页未找到图片")
            return None

        # 保存数据
        page_dir = self.save_page_data(page_num, images)

        # 下载图片（使用线程池并发下载）
        images_dir = os.path.join(page_dir, "images")
        os.makedirs(images_dir, exist_ok=True)

        download_results = []
        max_workers = 10  # 同时下载10个文件

        with ThreadPoolExecutor(max_workers=max_workers) as executor:
            # 提交所有下载任务
            future_to_image = {
                executor.submit(self.download_image, img, images_dir, i): (i, img)
                for i, img in enumerate(images)
            }

            # 收集结果
            for future in tqdm(
                as_completed(future_to_image),
                total=len(images),
                desc=f"下载第{page_num}页",
            ):
                i, img = future_to_image[future]
                try:
                    result = future.result()
                    result["page"] = page_num
                    download_results.append(result)
                except Exception as e:
                    result = {
                        "success": False,
                        "error": str(e),
                        "url": img.get("originalUrl", "") or img.get("thumbUrl", ""),
                        "page": page_num,
                    }
                    download_results.append(result)

        # 保存下载结果
        results_path = os.path.join(page_dir, "download_results.json")
        with open(results_path, "w", encoding="utf-8") as f:
            json.dump(download_results, f, ensure_ascii=False, indent=2)

        return {
            "page": page_num,
            "images_count": len(images),
            "download_dir": page_dir,
            "download_results": download_results,
        }

    def start_crawling(self) -> dict[str, Any]:
        """开始爬取"""
        logger.info("🎯 开始百度壁纸爬取任务...")
        print("当前下载工作目录:", os.path.abspath(self.base_dir))
        # self.init_driver()

        if not self.init_driver():
            return None

        try:
            # 访问搜索页面
            search_url = self.get_search_url()
            logger.info(f"🔗 访问搜索页面: {search_url}")
            self.driver.get(search_url)
            time.sleep(3)

            all_results = []

            # 模拟翻页（通过滚动加载更多内容）
            for page in range(1, self.max_pages + 1):

                # 模拟下一页（继续滚动）
                if page < self.max_pages:
                    print(f"开始滚动加载第{page + 1}页...", )
                    self.scroll_to_load_images(times=2)
                    time.sleep(2)
            result = self.crawl_page(page)
            if result:
                all_results.append(result)
                logger.info(
                    f"✅ 第 {page} 页爬取完成，共 {result['images_count']} 张图片"
                )

            # 生成总报告
            total_report = {
                "total_pages": self.max_pages ,
                "total_images": sum(r["images_count"] for r in all_results),
                "results": all_results,
                "base_dir": os.path.abspath(self.base_dir),
            }

            report_path = os.path.join(self.base_dir, "data", "crawl_report.json")
            with open(report_path, "w", encoding="utf-8") as f:
                json.dump(total_report, f, ensure_ascii=False, indent=2)

            logger.info("🎉 爬取任务完成！")
            logger.info(f"📊 总报告: {report_path}")

            return total_report

        except Exception as e:
            logger.error(f"❌ 爬取过程中出现错误: {str(e)}")
            return None
        finally:
            if self.driver:
                print("关闭浏览器")
                self.driver.quit()
                logger.info("🚪 浏览器已关闭")


def main():
    """主函数"""
    # 创建爬虫实例
    crawler = SeleniumBaiduCrawler(
        keyword="壁纸",
        max_pages=10,
        binary_location=r"F:\Chrome-bin\chrome.exe",
        base_dir="selenium_baidu_wallpapers",
        cookie="BDUSS=WIzcWZyYWNjflpNWDktSH5WbVE3WHdWdjVMYmRufkZwNWFGaUt1YXJjM2VSVHRuRVFBQUFBJCQAAAAAAAAAAAEAAADPRYIEbWFzeDIwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAN64E2feuBNnS; BDUSS_BFESS=WIzcWZyYWNjflpNWDktSH5WbVE3WHdWdjVMYmRufkZwNWFGaUt1YXJjM2VSVHRuRVFBQUFBJCQAAAAAAAAAAAEAAADPRYIEbWFzeDIwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAN64E2feuBNnS; BIDUPSID=521C5EBD29C266C534D8FABD3B40E550; PSTM=1730035593; MCITY=-289%3A; H_PS_PSSID=60271_62325_63147_63881_63948_64009_64051_64057_64056_64092_64094_64146_64159_64174_64182_64248_64245_64253_64258_64260_64270_64312; BAIDUID=E7D4BEF48E664BAFB7EDFEBFF557C0A2:SL=1:NR=10:FG=1; BA_HECTOR=ag2ha425a5ag802k01a1800h0h8h031k8gdrj24; BAIDUID_BFESS=E7D4BEF48E664BAFB7EDFEBFF557C0A2:SL=1:NR=10:FG=1; ZFY=9qMy9hHFGzBsC:A3I7avg7AwkSU7HWrC5lixfYyz09:A0:C; BDORZ=B490B5EBF6F3CD402E515D22BCDA1598; H_WISE_SIDS=62325_63948_64092_64146_64159_64253_64270_64312; arialoadData=false; ab_sr=1.0.1_ZTI0MDU4MzA0ZWI1NzMyMjVhZWI4OTFmNTVjMjEyODM0Y2MzNGU0YmFkNjJiYzI4YTMxMWU2MTE1N2IyYTllMTQ4ZTA3MjIwMjU5MDU2ZGYxOGE3NjlhNjAxZmIzYTE2MzI4MDVjMDIxNjc2ODFkNWJhNGEwNWQ1MWJhMDE5NzYwNTY2OWRhMDAzODIxNzhjMDJmOTJjMmJlNjg5ZTU4ZQ=="
        , executable_path=r"F:\迅雷下载\chromedriver-win64\chromedriver.exe",
        user_data_dir=r"f:\Google\Chrome\User Data"
    )

    # 开始爬取
    result = crawler.start_crawling()

    if result:
        print("\n" + "=" * 60)
        print("🎉 爬取任务完成！")
        print("=" * 60)
        print(f"📊 爬取页数: {result['total_pages']}")
        print(f"📈 总图片数: {result['total_images']}")
        print(f"📁 保存目录: {result['base_dir']}")
        print("=" * 60)
    else:
        print("❌ 爬取任务失败")


if __name__ == "__main__":
    main()
