from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.common.keys import Keys
from bs4 import BeautifulSoup
import requests
import os
import time
from PIL import Image
from io import BytesIO
import imghdr
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry
from urllib.parse import urlparse, parse_qs, urlunparse
from sklearn.feature_extraction.text import TfidfVectorizer
import itertools
from serpapi import GoogleSearch

index = 1


def search_images_yandex(query):
    """使用Selenium打开Yandex并获取搜索结果的页面源码"""
    chrome_options = webdriver.ChromeOptions()
    chrome_options.add_argument("--headless")  # 无头模式运行
    chrome_options.add_argument('--no-sandbox')
    chrome_options.add_argument('--disable-dev-shm-usage')

    driver = webdriver.Chrome(options=chrome_options)

    url = f"https://yandex.eu/images/search?text={query}&type=photo"
    driver.get(url)

    # 模拟向下滚动加载更多图片
    for _ in range(10):
        driver.find_element(By.TAG_NAME, 'body').send_keys(Keys.END)
        time.sleep(2)  # 等待页面加载新内容

    # 显式等待直到图片加载出来
    try:
        WebDriverWait(driver, 10).until(
            EC.presence_of_element_located((By.CLASS_NAME, 'ContentImage-Image_clickable'))
        )
    except Exception as e:
        print(f"页面加载失败: {e}")
        driver.quit()
        return None

    # 等待几秒以确保页面完全加载
    time.sleep(3)

    # 获取加载后的HTML页面
    page_source = driver.page_source
    driver.quit()  # 关闭浏览器
    soup = BeautifulSoup(page_source, 'html.parser')

    return soup, chrome_options


def download_image(image_url, save_path):
    """根据图片URL下载图片并验证合法性"""
    try:
        session = requests.Session()
        retries = Retry(total=5, backoff_factor=0.1, status_forcelist=[500, 502, 503, 504])
        session.mount('https://', HTTPAdapter(max_retries=retries))
        response = requests.get(image_url, stream=True, timeout=10, verify=False)

        # 检查是否为图片类型
        content_type = response.headers.get('Content-Type')
        if 'image' not in content_type:
            print(f"跳过非图片链接: {image_url}")
            return False

        # 将响应数据转换为字节并用PIL检查
        img_data = BytesIO(response.content)
        img = Image.open(img_data)

        # 保存图片
        img.save(save_path)

        # 验证图片文件是否合法
        if imghdr.what(save_path) is None:
            print(f"下载图片格式不正确: {image_url}")
            os.remove(save_path)
            return False

        print(f"图片已保存：{save_path}")
        return True

    except Exception as e:
        print(f"下载链接响应出错 {image_url}. 错误信息: {e}")
        return False


def extract_image(soup, save_directory, chrome_options, max_images, step):
    """从BeautifulSoup对象中提取图片URL，并进一步访问每个链接来抓取图片的真实URL"""
    image_num = 0

    # 查找页面中包含图片链接的 <a> 标签
    image_links = soup.find_all('a', class_='Link ContentImage-Cover')

    # 使用Selenium打开每个链接来提取图片URL
    driver = webdriver.Chrome(options=chrome_options)

    for link in image_links:
        # 获取链接中的实际图片页面URL
        image_page_url = link.get('href')
        if image_page_url:
            full_url = f"https://yandex.eu{image_page_url}"
            driver.get(full_url)

            try:
                # 显式等待直到图片的最终下载按钮出现
                WebDriverWait(driver, 10).until(
                    EC.presence_of_element_located((By.CLASS_NAME, 'Button2_link'))
                )

                # 获取页面源码并用BeautifulSoup解析
                page_source = driver.page_source
                image_soup = BeautifulSoup(page_source, 'html.parser')
                final_image_link = image_soup.find('a',
                                                   class_='Button2 Button2_pin_circle-brick Button2_size_xl Button2_link Button2_view_default OpenImageButton-Save MMViewerButtons-OpenImage MMViewerButtons-Button')

                if final_image_link:
                    img_url = final_image_link.get('href')
                    if img_url:
                        if image_num >= max_images:
                            break
                        print(f"图片链接: {img_url}")
                        # 清除URL中的查询参数
                        parsed_url = urlparse(img_url)
                        img_url = urlunparse(parsed_url._replace(query=''))
                        # 获取文件扩展名
                        file_extension = os.path.splitext(img_url)[1] or '.png'
                        # 保存图片
                        global index
                        save_path = os.path.join(save_directory, f'image_{step}_{index}{file_extension}')
                        if download_image(img_url, save_path):
                            image_num += 1
                            index += 1
            except Exception as e:
                print(f"错误的链接：{full_url}: {e}")
    driver.quit()


# 多阶段搜索逻辑
def multi_stage_search(query, save_directory, max_images, step):
    # 创建保存图片的目录
    if not os.path.exists(save_directory):
        os.makedirs(save_directory)

    # 检索图片
    print(f"检索阶段{step}: 使用关键词 '{query}' 搜索")
    soup, chrome_options = search_images_yandex(query)  # 使用Selenium获取Yandex图片搜索结果
    extract_image(soup, save_directory, chrome_options, max_images, step)  # 提取图片


def search_image(query, save_directory, max_images=10, step=1):
    params = {
        "engine": "yandex_images",
        "text": query,
        "image_type": "photo",
        "api_key": "ac6f7dbdf12c1e1cb9a038ed4a3661cdc8fb87441160eb712b5f98001532f3db"
    }

    search = GoogleSearch(params)
    results = search.get_dict()
    for i in range(max_images):
        image_url = results["images_results"][i]["original"]
        if image_url.find("jpg") == -1:
            file_extension = ".jpg"
        else:
            file_extension = ".png"
        download_image(image_url, save_directory + f"/image_{step}_{i}" + file_extension)


if __name__ == '__main__':
    # # 搜索关键字
    # query = "Husky wearing a straw hat on the snow"
    #
    # # 保存图片的目录
    # save_directory = "./yandex_images"
    #
    # # 提取图片
    # # extract_image(soup, save_directory, chrome_options,10)
    # multi_stage_search(query, save_directory, 5, 1)
    search_image("coffee", "./yandex_images", 10)
