import pandas as pd
import asyncio
from concurrent.futures import ThreadPoolExecutor
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import os
import logging

# 配置日志
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')

def capture_full_screen_screenshot(driver, file_path):
    window_size = driver.get_window_size()
    driver.set_window_size(1920, 1080)
    driver.save_screenshot(file_path)
    driver.set_window_size(window_size['width'], window_size['height'])

def search_and_capture(url, search_term):
    logging.info(f"访问网址：{url} 开始搜索：{search_term}")

async def main():
    df = pd.read_csv('urls.csv')

    with ThreadPoolExecutor(max_workers=5) as executor:
        futures = [executor.submit(search_and_capture, row['url'], row['search']) for _, row in df.iterrows()]
        for future in futures:
            result = future.result()
            if result:
                logging.info(f"截图保存在：{result}")

if __name__ == "__main__":
    asyncio.run(main())