import os
import time
import csv
from selenium import webdriver
from selenium.common import TimeoutException, NoSuchElementException, ElementClickInterceptedException, \
    StaleElementReferenceException
from selenium.webdriver.common.by import By
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from bs4 import BeautifulSoup

teacher_name = {
    # 教师的英文姓名需要提前知道
    '刘三女牙': 'Liu, SanYa',  # 49
    '吴砥': 'Di Wu or Wu,D',  # 44
    '余新国': 'Xinguo Yu',  # 51
    '陈增照': 'Zengzhao Chen or Chen Zengzhao ',  # 53
    '陈靓影': 'Jingying Chen',  # 50
    '易宝林': 'Yi Baolin or Baolin Yi or Yi,BL',  # 28
    '何秀玲': 'Xiuling He or He Xiuling  or He XL',  # 32
    '杜旭': 'Xu Du or Du Xu',  # 42
    '张昭理': 'Zhaoli Zhang',  # 44
    '黄涛': 'Tao Huang or Huang Tao',  # 48
    '吴龙凯': 'Wu Longkai or Wu,LK',  # 19
    '陈矛': 'Chen Mao or Mao Chen',  # 20
    '王泰': 'Wang Tai or Wang,T(Wang Tai) or Tai Wang',  # 15
    '张维': 'Wei Zhang',  # 60
    '周东波': 'Zhou Dongbo or Dongbo Zhou or Zhou,DB',  # 24
    '孙建文': 'Sun Jianwen or Jianwen Sun or Sun,JW(Sun jianwen)',  # 51
    '廖盛斌': 'Liao Shengbin or Shenbin Liao or Liao,SB(Liao Shenbin)',  # 8
    '朱晓亮': 'Zhu Xiaoliang or Xiaoliang Zhu or Zhu,XL(Zhu Xiaoliang)',  # 25
    '钟正': 'Zhong Zheng or Zheng Zhong or Zhong,Z(Zhong Zheng)',  # 17
    '戴志诚': 'Dai Zhicheng or Zhicheng Dai or Dai,ZC(Dai Zhicheng)',  # 28
    '舒江波': 'Shu Jiangbo or Jiangbo Shu or Shu,JB(Shu Jiangbo)',  # 41
    '吴珂': 'Wu Ke or Ke Wu',  # 14
    '刘乐元': 'Liu Leyuan or Leyuan Liu or Liu,LY(Liu Leyuan)',  # 37
    '张坤': 'Zhang Kun or Kun Zhang or Zhang,K(Zhang Kun)',  # 38
    '方莹': 'Fang Ying or Ying Fang or Fang,Y(Fang Ying)',  # 5
    '孙超': 'Sun Chao or Chao Sun or Sun,C(Sun Chao)',  # 17
    '何彬': 'He Bin or Bin He or He,B(He Bin)',  # 29
    '彭世新': 'Peng Shixin or Peng,SX(Peng Shixin) or Shixin Peng',  # 13
    '粟柱': 'Su Zhu or Zhu Su or Su,Z(Su Zhu)',  # 29
    '朱莎': 'Zhu Sha or Sha Zhu or Zhu,S(Zhu Sha)',  # 42
    '刘智': 'Liu Zhi or Zhi Liu or Liu,Z(Liu Zhi)',  # 89
    '赵亮': 'Zhao Liang or Liang Zhao or Zhao,L(Zhao Liang)',  # 52
    '刘海': 'Liu Hai or Liu,H(Liu Hai)',  # 86
    '张浩': 'Zhang Hao or Hao Zhang or Zhang,H(Zhang Hao)',  # 36
    '李浩': 'Hao Li',  # 39
    '张立山': 'Zhang Lishan or Zhang,LS(Zhang Lishan)',  # 13
    '曾致中': 'Zeng Zhizhong or Zhizhong Zeng or Zeng,ZZ(Zeng Zhizhong) or Zeng Z.(Zeng Zhizhong)',  # 8
    '陈敏': 'Chen Min or Chen,M(Chen Min)',  # 46
    '李卿': 'Qing Li or Li Qing or Li,Q(Li Qing)',  # 56
    '蔡畅': 'Cai Chang or Cai,C(Cai Chang) or Chang Cai',  # 15
    '石映辉': 'Shi Yinghui',  # 36
    '张婷': 'Zhang Ting',  # 25
    '彭晛': 'Peng Xian or Peng,X(Peng Xian) or Xian Peng',  # 27
    '吴晨': 'Wu Chen or Wu,C(Wu Chen)',  # 6
    '李亚婷': 'Li Yating or Li,YT(Li Yating)',  # 10
    '李睿': 'Li Rui or Rui Li or Li,R(Li Rui)',  # 80
    '陈旭': 'Chen Xu',  # 14
    '陈思菁': 'Chen Sijing',  # 6
    '梁如霞': 'Liang Ruxia',  # 9
    '周子荷': 'Zhou Zihe or Zhou,ZH(Zhou Zihe)',  # 1
    '周驰': 'Zhou Chi or Zhou,C(Zhou Chi)',  # 14
    '汤胜兵': 'Tang Shengbing or Tang,SB(Tang Shenbing)',  # 1
    '万仟': 'Wan Qian',  # 4
    '沈筱譞': 'Shen XX',  # 19
    '严中华': 'Yan Zhonghua',  # 3
    '徐建': 'Xu Jian',  # 22
    '李洋洋': 'Li Yangyang',  # 7
    '尉小荣': 'Wei Xiaorong or Xiaorong Wei or Wei,XR',  # 0
    '彭翕成': 'Peng Xicheng',  # 11
}


def setup_driver(driver_path):
    service = Service(executable_path=driver_path)
    options = Options()
    return webdriver.Chrome(service=service, options=options)


def wait_for_element(driver, by, value, timeout=10):
    return WebDriverWait(driver, timeout).until(
        EC.visibility_of_element_located((By.XPATH, value))
    )


def wait_for_clickable(driver, by, value, timeout=10):
    return WebDriverWait(driver, timeout).until(
        EC.element_to_be_clickable((By.XPATH, value))
    )


def safe_get_text(driver, xpath, default='N/A'):
    try:
        return driver.find_element(By.XPATH, xpath).text
    except (NoSuchElementException, StaleElementReferenceException):
        return default


def scrape_data(driver):
    title_Xpath = '//*[@id="FullRTa-fullRecordtitle-0"]'
    authors_Xpath = '//*[@id="SumAuthTa-MainDiv-author-en"]/span'
    pub_date_Xpath = '//*[@id="FullRTa-pubdate"]'
    citations_Xpath = '//*[@id="FullRRPTa-wos-citation-network-times-cited-count-link-19"]'
    QiKan_Xpath = '//*[@id="snMainArticle"]/div[6]/span/app-jcr-sidenav/mat-sidenav-container/mat-sidenav-content/span/a/span'
    title = safe_get_text(driver, title_Xpath)
    authors = safe_get_text(driver, authors_Xpath)
    pub_date = safe_get_text(driver, pub_date_Xpath)
    citations = safe_get_text(driver, citations_Xpath, '0')  # 默认值为 '0'，因为引用次数可能为0
    QiKan = safe_get_text(driver, QiKan_Xpath)

    return [title, authors, pub_date, citations, QiKan]


def setup_driver_with_debugger():
    # 设置Chrome选项以使用调试端口
    options = Options()
    options.add_experimental_option("debuggerAddress", "127.0.0.1:9527")
    driver = webdriver.Chrome(options=options)
    return driver


def error_search(driver):
    # 检查是否存在错误消息的元素
    error_elements = driver.find_elements(By.CSS_SELECTOR, "div.search-error.error-code.light-red-bg.ng-star-inserted")
    return bool(error_elements)


def main():
    """
    先配置环境变量，用cmd打开浏览器,命令为：chrome.exe --remote-debugging-port=9527 --user-data-dir="D:\selenium_chrome_9527"
    当爬取显示Please close your session and start a new one.时把D:\selenium_chrome_9527文件夹删除，重新运行上面这行，会自动重新打开浏览器
    这一步操作详情参考：https://blog.csdn.net/weixin_45081575/article/details/112621581
    然后登录华师一站式平台，再访问图书馆，找外文数据库的Web_SCI,关闭其他界面，只保留web_SCI界面,再运行这个程序
    """
    driver = setup_driver_with_debugger()

    print(f'当前界面为:{driver.current_url}')  # 你可以把后面的打上注释，看看这一段能不能成功运行

    # 创建文件夹保存结果
    output_dir = 'result_sci'
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)
    """
    进入页面开始爬取的相关工作
    """
    # 在循环开始前初始化一个空列表来存储搜索无结果的教师信息,可能我们预设的英文名不对,也可能老师没发SCI（bushi）
    no_results = []
    for Chinese_name, english_name in teacher_name.items():
        print(f"正在爬取{Chinese_name}_{english_name}")
        # 刷新页面
        driver.refresh()
        # 输入教师姓名
        wait_for_element(driver, By.XPATH, '//*[@id="mat-input-0"]')
        input_box = driver.find_element(By.XPATH, '//*[@id="mat-input-0"]')
        input_box.clear()  # 清除输入框中的现有内容
        input_box.send_keys(english_name)
        # 输入学校单位
        wait_for_element(driver, By.XPATH, '//*[@id="mat-input-1"]')
        input_box = driver.find_element(By.XPATH, '//*[@id="mat-input-1"]')
        input_box.clear()  # 清除输入框中的现有内容
        input_box.send_keys('Central China Normal University')
        # 点击搜索按钮
        guide_close_button = wait_for_clickable(driver, By.XPATH, '//*[@id="snSearchType"]/div[4]/button[2]/span[1]')
        guide_close_button.click()
        time.sleep(1)
        if error_search(driver):
            no_results.append([Chinese_name, english_name])
            print('无搜索结果')
            continue
        # 点击第一个文章页面
        first_papper = wait_for_clickable(driver, By.XPATH,
                                          '/html/body/app-wos/main/div/div/div[2]/div/div/div[2]/app-input-route/app-base-summary-component/div/div[2]/app-records-list/app-record[1]/div/div/div[2]/div[1]/app-summary-title/h3/a')
        first_papper.click()
        total_page = driver.find_element(By.XPATH,
                                         '/html/body/app-wos/main/div/div/div[2]/div/div/div[2]/app-input-route/app-full-record-home/div[1]/app-page-controls/div/form/div/span').text
        total_page = int(total_page)
        current_page = 0
        all_data = []
        while True:
            current_page += 1
            # 爬取这一页的内容
            all_data.append(scrape_data(driver))
            next_page_bnt = wait_for_clickable(driver, By.XPATH,
                                               '/html/body/app-wos/main/div/div/div[2]/div/div/div[2]/app-input-route/app-full-record-home/div[1]/app-page-controls/div/form/div/button[2]/span[1]')
            next_page_bnt.click()
            if current_page >= total_page:
                break

        # 所有页面爬取完成后，写入CSV文件
        with open(f'{output_dir}/output_{english_name}.csv', 'w', newline='', encoding='utf-8') as file:
            writer = csv.writer(file)
            writer.writerow(['Title', 'Authors', 'Publication Date', 'Citations', 'References', 'QiKan'])
            writer.writerows(all_data)
        print(f'output_{english_name}.csv爬取完毕！')

        # 重回搜索页，爬取下一个老师
        search_back_bnt = wait_for_clickable(driver, By.XPATH, '//*[@id="breadcrumb"]/ul/li[1]/div/a/span/span')
        search_back_bnt.click()
        time.sleep(1)

    with open(f'{output_dir}/no_result_teachers.csv', 'w', newline='', encoding='utf-8') as file:
        writer = csv.writer(file)
        writer.writerow(['Chinese Name', 'English Name'])  # 写入头部
        writer.writerows(no_results)  # 写入所有收集的数据
    # driver.close()


if __name__ == "__main__":
    main()
