import os
import time
import csv
from selenium import webdriver
from selenium.common import TimeoutException, NoSuchElementException, ElementClickInterceptedException, \
    StaleElementReferenceException
from selenium.webdriver.common.by import By
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from bs4 import BeautifulSoup
import ast
import csv


def wait_for_clickable(driver, by, value, timeout=10):
    return WebDriverWait(driver, timeout).until(
        EC.element_to_be_clickable((By.XPATH, value))
    )


def safe_get_text(driver, xpath, default='N/A'):
    try:
        return driver.find_element(By.XPATH, xpath).text
    except (NoSuchElementException, StaleElementReferenceException):
        return default


def scrape_data(driver):
    data = []
    for i in range(50):
        title_Xpath = f'//*[@id="gridTable"]/div/div[2]/table/tbody/tr[{i + 1}]/td[2]/a'
        authors_Xpath = f'//*[@id="gridTable"]/div/div[2]/table/tbody/tr[{i + 1}]/td[3]'
        pub_date_Xpath = f'//*[@id="gridTable"]/div/div[2]/table/tbody/tr[{i + 1}]/td[7]'

        title = safe_get_text(driver, title_Xpath)
        authors = safe_get_text(driver, authors_Xpath)
        pub_date = safe_get_text(driver, pub_date_Xpath)
        data.append([title, authors, pub_date])
    return data


def setup_driver_with_debugger():
    # 设置Chrome选项以使用调试端口
    options = Options()
    options.add_experimental_option("debuggerAddress", "127.0.0.1:9527")
    driver = webdriver.Chrome(options=options)
    return driver


def main():
    """
    先配置环境变量，用cmd打开浏览器,命令为：chrome.exe --remote-debugging-port=9527 --user-data-dir="D:\selenium_chrome_9527"
    :return:
    """
    # 这里类似于03.py 需要提前打开爬取的界面，

    driver = setup_driver_with_debugger()

    # 创建文件夹保存结果
    output_dir = 'result_CNKI_ZL'
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)
    """
    进入页面开始爬取的相关工作
    """
    current_page = 0
    total_page = 61
    all_data = []
    while True:
        current_page += 1
        # 爬取这一页的内容
        all_data.append(scrape_data(driver))
        print(all_data[-1])
        if current_page == 1:
            next_page_bnt = wait_for_clickable(driver, By.XPATH, '//*[@id="countPageDiv"]/span[3]')
        else:
            next_page_bnt = wait_for_clickable(driver, By.XPATH, '//*[@id="countPageDiv"]/span[4]')
        next_page_bnt.click()
        if current_page >= total_page:
            break

    # 所有页面爬取完成后，写入CSV文件
    with open(f'{output_dir}/output.csv', 'w', newline='', encoding='utf-8') as file:
        writer = csv.writer(file)
        writer.writerows(all_data)
    print(f'output.csv爬取完毕！')

    # driver.close()


if __name__ == "__main__":
    main()
