from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.chrome.options import Options
from webdriver_manager.chrome import ChromeDriverManager
import time
import sys
import os

def create_output_dir():
    # 创建out目录
    out_dir = 'out'
    if not os.path.exists(out_dir):
        os.makedirs(out_dir)
    return out_dir

def get_image_urls(keyword):
    # 配置Chrome选项
    chrome_options = Options()
    chrome_options.add_argument('--headless')
    chrome_options.add_argument('--disable-gpu')
    chrome_options.add_argument('--no-sandbox')
    chrome_options.add_argument('--disable-dev-shm-usage')
    
    # 初始化浏览器
    driver = webdriver.Chrome(
        service=Service(ChromeDriverManager().install()),
        options=chrome_options
    )
    
    try:
        # 访问百度图片搜索
        url = f'https://image.baidu.com/search/index?tn=baiduimage&word={keyword}'
        driver.get(url)
        
        # 等待页面加载
        time.sleep(3)
        
        # 滚动加载更多图片
        for _ in range(3):
            driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
            time.sleep(1)
        
        # 获取图片元素
        images = driver.find_elements(By.CSS_SELECTOR, '.main_img.img-hover')
        image_urls = []
        
        # 提取图片URL
        for img in images:
            img_url = img.get_attribute('src') or img.get_attribute('data-src')
            if img_url and img_url.startswith('http'):
                image_urls.append(img_url)
                
        return image_urls
    
    except Exception as e:
        print(f'Error occurred: {e}')
        return []
    finally:
        driver.quit()

def save_urls_to_file(urls, filename):
    try:
        with open(filename, 'w', encoding='utf-8') as f:
            for url in urls:
                f.write(url + '\n')
        print(f'Saved {len(urls)} URLs to {filename}')
    except Exception as e:
        print(f'Error saving file: {e}')

def process_keywords_file(keywords_file):
    if not os.path.exists(keywords_file):
        print(f'Keywords file {keywords_file} not found')
        return False
    
    out_dir = create_output_dir()
    
    with open(keywords_file, 'r', encoding='utf-8') as f:
        keywords = [line.strip() for line in f if line.strip()]
    
    for keyword in keywords:
        output_file = os.path.join(out_dir, f'{keyword}_baidu_image_urls.txt')
        print(f'Processing keyword: {keyword}')
        image_urls = get_image_urls(keyword)
        
        if image_urls:
            save_urls_to_file(image_urls, output_file)
        else:
            print(f'No image URLs found for keyword: {keyword}')
    
    return True

if __name__ == '__main__':
    # 支持两种运行模式：
    # 1. 单个关键词模式：python baidu_image_scraper.py <keyword>
    # 2. 批量模式：python baidu_image_scraper.py --keywords keywords.txt
    if len(sys.argv) == 2:
        # 单个关键词模式
        keyword = sys.argv[1]
        out_dir = create_output_dir()
        output_file = os.path.join(out_dir, f'{keyword}_baidu_image_urls.txt')
        
        print(f'Searching for images with keyword: {keyword}')
        image_urls = get_image_urls(keyword)
        
        if image_urls:
            save_urls_to_file(image_urls, output_file)
        else:
            print('No image URLs found')
    elif len(sys.argv) == 3 and sys.argv[1] == '--keywords':
        # 批量模式
        keywords_file = sys.argv[2]
        process_keywords_file(keywords_file)
    else:
        print('Usage:')
        print('Single keyword mode: python baidu_image_scraper.py <keyword>')
        print('Batch mode: python baidu_image_scraper.py --keywords <keywords_file>')
        sys.exit(1)