import os
import time
import csv
from selenium import webdriver
from selenium.common import TimeoutException, NoSuchElementException, ElementClickInterceptedException
from selenium.webdriver.common.by import By
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from bs4 import BeautifulSoup


def setup_driver(driver_path):
    service = Service(executable_path=driver_path)
    options = Options()
    return webdriver.Chrome(service=service, options=options)


def read_teacher_names(csv_path):
    teacher_names = []
    with open(csv_path, newline='', encoding='utf-8') as csvfile:
        reader = csv.DictReader(csvfile)
        for row in reader:
            teacher_names.append(row['\ufeffName'])  # 使用正确的列名 "姓名"
    return teacher_names


def wait_for_element(driver, by, value, timeout=10):
    return WebDriverWait(driver, timeout).until(
        EC.visibility_of_element_located((By.XPATH, value))
    )


def wait_for_clickable(driver, by, value, timeout=10):
    return WebDriverWait(driver, timeout).until(
        EC.element_to_be_clickable((By.XPATH, value))
    )


def check_university(label_text, university_name, teacher_name):
    if university_name in label_text:
        print(label_text)
        return label_text
    else:
        print(f'{teacher_name}不是{university_name}的')
        return f"{teacher_name}非华师"


def scrape_data(driver):
    soup = BeautifulSoup(driver.page_source, 'html.parser')
    tbody = soup.find('tbody')
    data = []
    if tbody:
        for tr in tbody.find_all('tr'):
            row = [td.get_text(strip=True) for td in tr.find_all('td')]
            data.append(row)
    return data


def main():
    """
    这个函数实现的是将01.py得到的教师姓名作为知网搜索关键词进行论文爬取
    """
    teacherschool = '华中师范大学'  # 修改为相应的教师单位

    driver_path = 'C:……/Python/Python39/Scripts/chromedriver.exe'  # 改为你下载安装的路径
    driver = setup_driver(driver_path)
    driver.get('https://kns.cnki.net/kns8s/AdvSearch?classid=WD0FTY92')
    result_directory = 'result_CNKI'
    teacher_names = read_teacher_names(f'{result_directory}/result/teachers.csv')
    # 创建文件夹保存结果
    if not os.path.exists(result_directory):
        os.makedirs(result_directory)
    # 开始爬取
    for teacher_name in teacher_names:
        # 检查结果文件夹中是否已存在包含该老师名字的文件
        already_crawled = any(teacher_name in filename for filename in os.listdir(result_directory))
        if already_crawled:
            print(f"{teacher_name} 的数据已经被爬取过了，跳过此次爬取。")
            continue

        # 如果没有被爬取过，执行爬取逻辑
        print(f"开始爬取 {teacher_name} 的数据...")
        try:
            wait_for_element(driver, By.XPATH, '//*[@id="gradetxt"]/dd[2]/div[2]/input')
            input_box = driver.find_element(By.XPATH, '//*[@id="gradetxt"]/dd[2]/div[2]/input')
            input_box.clear()  # 清除输入框中的现有内容
            input_box.send_keys(teacher_name)
            time.sleep(1)
            # 选择单位为华中师范大学
            found = False
            try:
                li_index = 1
                while not found:
                    try:
                        label_xpath = f'//*[@id="gradetxt-2"]/div/ul/li[{li_index}]/div/label'
                        label_text = wait_for_element(driver, By.XPATH, label_xpath).text
                        if teacherschool in label_text:
                            input_xpath = f'//*[@id="gradetxt-2"]/div/ul/li[{li_index}]/div/label/input'
                            next_button = wait_for_clickable(driver, By.XPATH, input_xpath)
                            next_button.click()
                            found = True
                        else:
                            li_index += 1
                            # 尝试滚动到下一个元素
                            scroll_container_xpath = '//*[@id="gradetxt-2"]/div/ul'
                            scroll_container = driver.find_element(By.XPATH, scroll_container_xpath)
                            scroll_to_element_xpath = f'//*[@id="gradetxt-2"]/div/ul/li[{li_index}]'
                            scroll_to_element = driver.find_element(By.XPATH, scroll_to_element_xpath)
                            driver.execute_script("arguments[0].scrollTop = arguments[1].offsetTop;", scroll_container,
                                                  scroll_to_element)
                    except NoSuchElementException:
                        current_page_xpath = '//*[@id="gradetxt-2"]/div/div[3]/b'
                        total_pages_xpath = '//*[@id="gradetxt-2"]/div/div[3]/em'
                        current_page = driver.find_element(By.XPATH, current_page_xpath).text
                        total_pages = driver.find_element(By.XPATH, total_pages_xpath).text
                        if current_page == total_pages:
                            print("已到达最后一页。")
                            break
                        else:
                            next_page_button = driver.find_element(By.XPATH, '//*[@id="gradetxt-2"]/div/div[3]/a[2]')
                            driver.execute_script("arguments[0].click();", next_page_button)
                            li_index = 1  # 重置索引
                            driver.execute_script("arguments[0].scrollTop = 0;", scroll_container)
                    except ElementClickInterceptedException:
                        next_button = driver.find_element(By.XPATH, input_xpath)
                        driver.execute_script("arguments[0].scrollIntoView(true);", next_button)
                        next_button.click()
            except Exception as e:
                print(f"处理 {teacher_name} 时发生错误：{e}")

            # 添加点击操作以清除遮挡
            clear_obstruction_button = wait_for_clickable(driver, By.XPATH,
                                                          '//*[@id="ModuleSearch"]/div[1]/div/div[2]/ul/li[5]')
            clear_obstruction_button.click()
            if found:
                search_button = wait_for_clickable(driver, By.XPATH,
                                                   '//*[@id="ModuleSearch"]/div[1]/div/div[2]/div/div[1]/div[1]/div[2]/div[3]/input')
                search_button.click()

                all_data = []
                while True:
                    all_data.extend(scrape_data(driver))
                    try:
                        next_page_button = wait_for_clickable(driver, By.XPATH, '//*[@id="PageNext"]')
                        if next_page_button.is_enabled():
                            next_page_button.click()
                            time.sleep(2)  # 等待页面加载
                        else:
                            print("已到达最后一页。")
                            break
                    except TimeoutException:
                        print("已到达最后一页。")
                        break
                    except Exception as e:
                        print("发生错误：", str(e))
                        break

                with open(f'output_{teacher_name}.csv', 'w', newline='', encoding='utf-8') as file:
                    writer = csv.writer(file)
                    for row in all_data:
                        writer.writerow(row[:-1])

                print(f"output_{teacher_name}.csv已完成！")

                # 在搜索下一个老师之前，先把搜索栏打开
                openSearchButton = wait_for_clickable(driver, By.XPATH,
                                                      '//*[@id="ModuleSearch"]/div[1]/div/div[2]/div/div[2]/a[2]')
                openSearchButton.click()
                time.sleep(2)  # 等待页面加载
        except Exception as e:
            print(f"处理 {teacher_name} 时发生错误：{e}")
    driver.quit()


if __name__ == "__main__":
    main()
