import pandas as pd
import asyncio
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.chrome.options import Options

import os
import logging
import time

# 配置日志
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')

def capture_full_screen_screenshot(driver, file_path):
    window_size = driver.get_window_size()
    driver.set_window_size(1920, 1080)
    driver.save_screenshot(file_path)
    driver.set_window_size(window_size['width'], window_size['height'])

def search_and_capture(driver, url, search_term):
    driver.get(url)
    logging.info(f"访问网址：{url}")
    # 等待页面加载
    time.sleep(2) # 隐式等待

    search_box = driver.find_element(By.NAME, 'q')
    search_box.send_keys(search_term)
    search_box.send_keys(Keys.RETURN)
    logging.info(f"搜索词：{search_term}")
    # 等待搜索结果页面加载
    driver.implicitly_wait(6)

    folder_name = url.split('/')[2].replace('.', '_')
    folder_path = os.path.join(os.getcwd(), folder_name)
    if not os.path.exists(folder_path):
        os.makedirs(folder_path)

    screenshot_path = os.path.join(folder_path, f"{search_term}.png")
    capture_full_screen_screenshot(driver, screenshot_path)
    logging.info(f"截图保存在：{screenshot_path}")
    return screenshot_path

async def main():
    chrome_options = Options()
    chrome_options.add_argument("--headless")
    driver = webdriver.Chrome(service=Service(), options=chrome_options)
    df = pd.read_csv('urls.csv')

    for index, row in df.iterrows():
        url = row['url']
        search_term = row['search']  # 确保列名与CSV文件中的列名匹配
        logging.info(f'正在处理第{index+1}行，网址为{url}，搜索词为{search_term}...')
        search_and_capture(driver, url, search_term)

    # 所有任务完成后关闭WebDriver
    # driver.quit()

if __name__ == "__main__":
    asyncio.run(main())