# -*- coding: utf-8 -*-

"""
File: WOS_spider.py
Author: Dramwig
Email: dramwig@outlook.com
Date: 2024-02-27
Version: 1.6

Description: This script uses Selenium and BeautifulSoup to scrape detailed paper information from Web of Science (WOS) website.
It navigates through each paper's detail page, extracts key information such as title, citation count, country, journal, etc., 
and saves the collected data into a CSV file.

Please note that this script is intended for educational purposes only, and you should abide by the terms of service and usage policies 
of the Web of Science when using it or any derivative work.

"""
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.common.keys import Keys
from selenium import webdriver
from bs4 import BeautifulSoup
import pandas as pd
import time, keyboard

import requests


# pip install
# import PyExecJS


# 解析html
def parse_html(html):
    soup = BeautifulSoup(html, 'html.parser')

    # 创建一个空的字典
    data_dict = {}
    try:
        class_title = soup.find(id="HiddenSecTa-accessionNo")
        data_dict['accessionNo'] = class_title.text.strip()
        # print('\t' + class_title.text.strip())
    except:
        # 抛出异常
        print("获取ID失败")
        raise ValueError("获取ID失败")

    try:
        containers = soup.find_all('div', class_='cdx-two-column-grid-container')
        infoData = {}
        for container in containers:
            # 在这个容器内找到所有的标签和数据
            labels = container.find_all(class_='cdx-grid-label')
            datas = container.find_all(class_='cdx-grid-data')
            label = labels[0].text.strip()
            data_texts = [data.text.strip() for data in datas]  # 提取数据列表中的文本
            text = '\n'.join(data_texts)  # 将文本连接成一个字符串，使用换行符分隔

            # 存储到字典中
            infoData[label] = text

        # 提取摘要、关键词、类型、来源
        data_dict['abstractInfo'] = infoData['Abstract']
        data_dict['keywords'] = infoData['Keywords']
        data_dict['documentType'] = infoData['Document Type']
        data_dict['source'] = infoData['Source']
        # 日期、作者
        data_dict['indexed'] = infoData['Indexed']
        data_dict['by'] = infoData['By']
        data_dict['authorInformation'] = infoData['Author Information']
        data_dict['classification'] = infoData['Categories/ Classification']
        # 去掉重复数据
        infoData.pop('Abstract')
        infoData.pop('Keywords')
        infoData.pop('Document Type')
        infoData.pop('Source')
        infoData.pop('Indexed')
        infoData.pop('By')
        infoData.pop('Author Information')
        infoData.pop('Categories/ Classification')
        data_dict['infoData'] = infoData

    except:
        print("解析容器失败")

    try:
        class_title = soup.find(class_="title text--large cdx-title")
        data_dict['title'] = class_title.text.strip()
        # print('\t' + class_title.text.strip())
    except:
        print("获取标题失败")

    # 引用论文数量
    try:
        class_citation = soup.find(class_="mat-tooltip-trigger medium-link-number link ng-star-inserted")
        data_dict['citation'] = class_citation.text.strip()
    except:
        data_dict['citation'] = '0'

    # 作者地址
    try:
        class_addresses = soup.find('span', class_='ng-star-inserted', id='FRAOrgTa-RepAddressFull-0')
        # print('\t' + class_addresses.text.strip())
        data_dict['country'] = class_addresses.text.split(',')[-1].strip()
    except:
        try:
            class_addresses = soup.find('span', class_='value padding-right-5--reversible')
            # print('\t查询规则2：' + class_addresses.text.strip())
            data_dict['country'] = class_addresses.text.split(',')[-1].strip()
        except:
            print("获取国家失败")
    #
    # try:
    #     class_journal = soup.find(
    #         class_="mat-focus-indicator mat-tooltip-trigger font-size-14 summary-source-title-link remove-space no-left-padding mat-button mat-button-base mat-primary font-size-16 ng-star-inserted")
    #     data_dict['journal'] = class_journal.text.strip()
    # except:
    #     print("获取期刊失败")

    try:
        input_box = soup.find(class_='wos-input-underline page-box', id="snNextPageTop")  # 获取包含输入框的标签
        index = int(input_box['aria-label'].split()[-1].replace(",", ""))
    except:
        print("获取页码失败")

    return index, data_dict


def search(keyword, cookie):
    # 切换url https://www.webofscience.com/wos/alldb/basic-search
    # driver.href =
    # driver.get('https://www.webofscience.com/wos/alldb/basic-search')
    driver.get('https://webofscience.clarivate.cn/wos/alldb/basic-search')
    if cookie:
        # 关闭管理Cookie栏
        WebDriverWait(driver, 10).until(
            EC.visibility_of_element_located((By.CSS_SELECTOR, '#onetrust-close-btn-container > button'))
        )
        time.sleep(5)
        driver.find_element(By.CSS_SELECTOR, '#onetrust-close-btn-container > button').click()

        WebDriverWait(driver, 10).until(
            EC.visibility_of_element_located((By.CLASS_NAME, 'selects'))
        )
        # 搜索 准备爬数据
        driver.find_element(By.CLASS_NAME, 'selects').click()
        # 切换选项 出版物/来源出版物名称
        driver.find_element(By.CSS_SELECTOR,
                            '#global-select > div.options-and-search > div > div[title="Publication/Source Titles"]').click()

    WebDriverWait(driver, 10).until(
        EC.visibility_of_element_located((By.ID, 'search-option'))
    )
    # 输入搜索内容 清空内容后输入
    search_option = driver.find_element(By.ID, 'search-option')
    search_option.clear()
    search_option.send_keys(keyword)
    # 失去焦点
    webdriver.ActionChains(driver).send_keys(Keys.ESCAPE).perform()
    # 点击搜索
    driver.find_element(By.CSS_SELECTOR,
                        '#snSearchType > div.button-row > button.mat-focus-indicator.search.mat-flat-button.mat-button-base.mat-primary').click()
    # 判断是否搜索成功
    time.sleep(0.2)
    error_code = driver.find_elements(By.CSS_SELECTOR,
                                      '#snSearchType > div.search-error.error-code.light-red-bg.ng-star-inserted')
    if len(error_code) > 0:
        print("搜索失败")
        return

    try:

        WebDriverWait(driver, 5).until(
            EC.visibility_of_element_located((By.CSS_SELECTOR, '#pendo-base ._pendo-close-guide'))
        )
        # 或者等待直到某个元素可见
        driver.find_element(By.CSS_SELECTOR,
                            '#pendo-base ._pendo-close-guide').click()
    except:
        print("")
    click0()


def click0():
    # 获取搜索数量 然后点击列表进入详情页面开始爬数据
    WebDriverWait(driver, 10).until(
        EC.visibility_of_element_located((By.CSS_SELECTOR,
                                          'app-base-summary-component app-records-list > app-record app-summary-title a:nth-child(1)'))
    )
    # 点击第一个
    driver.find_element(By.CSS_SELECTOR,
                        'app-base-summary-component app-records-list > app-record app-summary-title a:nth-child(1)').click()


def get_qikan_data(type, dataSource, isQuote=False):
    global data, old_url
    condition = True
    i = 0
    while condition and i < 10000000000:
        i = i + 1
        # 等待页面加载
        try:
            # 或者等待直到某个元素可见
            WebDriverWait(driver, 10).until(
                EC.visibility_of_element_located((By.CSS_SELECTOR,
                                                  'app-page-controls .pagination > div > button:nth-child(4)'))
            )
        except Exception as e:
            print("等待超时，页面不存在该元素，也可能是页面加载失败")
            break
        time.sleep(1)

        url = driver.current_url

        # 解析HTML
        try:
            html = driver.page_source
            index, data = parse_html(html)
            # 类型来源
            data['type'] = type
            data['dataSource'] = dataSource
            # 获取当前url
            data['url'] = url
            # print(data)
            print("正在处理第", index, "篇论文")
            res = requests.post('http://localhost:1728/qikan/journal/getByAccessionNo',
                                json={'accessionNo': data['accessionNo']})
            if res.json().get('data') == '0':
                print("保存")
                requests.post('http://localhost:1728/qikan/journal/save', json=data)
            else:
                print("该论文已存在")

        except Exception as e:
            print(driver.current_url)
            print("解析失败", e)

            if driver.current_url.startswith('https://webofscience.clarivate.cn/wos/error/generic'):
                print("跳转错误，休眠 5秒")
                # if url.startswith('https://webofscience.clarivate.cn/wos/error/generic'):
                #     driver.get(old_url)
                # else:

                driver.find_element(By.CSS_SELECTOR,
                                    '#breadcrumb > ul > li:nth-child(20) > div > a').click()

                #     driver.get(url)
                driver.back()
                time.sleep(5)
                # driver.find_element(By.CSS_SELECTOR,
                #                     'app-page-controls .pagination > div > button:nth-child(4)').click()

        if isQuote:
            # 读取引用期刊
            # 获取当前窗口句柄，以便稍后切换回来
            original_window = driver.current_window_handle
            try:

                WebDriverWait(driver, 3).until(
                    EC.visibility_of_element_located((By.ID,
                                                      'FRMiniCrlTa-viewCitedRefLink'))
                )
                linkBtn1 = driver.find_element(By.ID,
                                               'FRMiniCrlTa-viewCitedRefLink')
                if linkBtn1 is not None and linkBtn1.text.strip() > '0':
                    driver.execute_script("window.open('" + linkBtn1.get_attribute('href') + "');")
                    # 切换
                    driver.switch_to.window(driver.window_handles[-1])
                    try:
                        click0()
                        get_qikan_data('20', data['accessionNo'])
                    except Exception as e:
                        print("引用跳转，抓数据异常")
                    # 关闭当前（即新打开的）标签页
                    driver.close()
                    # 切换回原始标签页
                    driver.switch_to.window(original_window)
            except Exception as e:
                print("没有找到引用按钮")

                if driver.current_url.startswith('https://webofscience.clarivate.cn/wos/error/generic'):
                    print("跳转错误，休眠 5秒")
            # row_index = f'Row_{index}'
            # if row_index in df.index:
            #     df.loc[row_index] = pd.Series(data, name=row_index)  # 如果行索引存在，则覆盖对应行的数据
            # else:
            #     df = df.append(pd.Series(data, name=row_index))  # 如果行索引不存在，则追加新的行
            # df.to_csv(file_path, index=True)  # 将DataFrame保存为CSV文件,保留行索引作为第一列

        old_url = driver.current_url
        try:
            # 手动操作，比如切换标签页等
            nextBtn = driver.find_element(By.CSS_SELECTOR,
                                          'app-page-controls .pagination > div > button:nth-child(4)')
            if 'mat-button-disabled' not in nextBtn.get_attribute('class'):
                # 切换到下一页
                nextBtn.click()

                if driver.current_url.startswith('https://webofscience.clarivate.cn/wos/error/generic'):
                    print("跳转错误，休眠 5秒")

            # input("请手动操作至论文详情页面,完成后按Enter键继续...")
            else:
                condition = False
        except Exception as e:
            print(old_url)
            print("An error occurred:", e)

            if driver.current_url.startswith('https://webofscience.clarivate.cn/wos/error/generic'):
                print("跳转错误，休眠 5秒")
                driver.get(url)
                time.sleep(5)
                driver.find_element(By.CSS_SELECTOR,
                                    'app-page-controls .pagination > div > button:nth-child(4)').click()
                break


if __name__ == "__main__":
    # 0000391627
    # adolescent depression 1: https://webofscience-clarivate-cn-s.era.lib.swjtu.edu.cn/wos/alldb/full-record/WOS:000653016400005 
    # url_root = 'https://access.clarivate.com/login?app=wos&alternative=true&shibShireURL=https:%2F%2Fwww.webofknowledge.com%2F%3Fauth%3DShibboleth&shibReturnURL=https:%2F%2Fwww.webofknowledge.com%2F%3Fmode%3DNextgen%26action%3Dtransfer%26path%3D%252Fwos%26DestApp%3DUA&referrer=mode%3DNextgen%26path%3D%252Fwos%26DestApp%3DUA%26action%3Dtransfer&roaming=true'
    url_root = 'http://www.scihuber.com/e/member/login/'
    # url_root = 'https://webofscience-clarivate-cn-s.era.lib.swjtu.edu.cn/wos/alldb/basic-search'
    wait_time = 5

    # 创建ChromeOptions对象
    chrome_options = webdriver.ChromeOptions()

    # 禁止加载图片等资源
    chrome_options.add_argument("--disable-images")
    chrome_options.add_argument("--disable-plugins")
    chrome_options.add_argument("--disable-extensions")
    # 设置全屏参数
    chrome_options.add_argument("--start-maximized")

    # 创建WebDriver对象时传入ChromeOptions
    driver = webdriver.Chrome(options=chrome_options, service=Service(
        r'C:\\Users\\Administrator\\Downloads\\chromedriver-win64\\chromedriver.exe'))

    driver.get(url_root)  # 打开的页面

    # 开始登录
    # driver.find_element(By.CLASS_NAME, 'example-full-width').click()
    #
    # WebDriverWait(driver, wait_time).until(
    #     EC.visibility_of_element_located((By.ID, 'mat-option-10'))
    # )
    # driver.find_element(By.ID, 'mat-option-10').click()
    # driver.find_element(By.CSS_SELECTOR, '.shibboleth-btn-flex-end button').click()
    #
    # # 或者等待直到某个元素可见
    # # element = WebDriverWait(driver, wait_time).until(
    # #     EC.visibility_of_element_located((By.ID, 'show'))
    # # )
    #
    # driver.find_element(By.ID, 'show').send_keys('华东理工大学')
    #
    # WebDriverWait(driver, wait_time).until(
    #     EC.visibility_of_element_located((By.CLASS_NAME, 'dropdown-item'))
    # )
    # driver.find_element(By.CLASS_NAME, 'dropdown-item').click()
    # driver.find_element(By.ID, 'idpSkipButton').click()
    #
    # # 登录
    # WebDriverWait(driver, wait_time).until(
    #     EC.visibility_of_element_located((By.ID, 'username'))
    # )
    driver.find_element(By.ID, 'username').send_keys('058397')
    driver.find_element(By.ID, 'password').send_keys('022209832')
    driver.find_element(By.CSS_SELECTOR, '#maincolumn .lBtn').click()
    # input("请手动操作至论文详情页面,完成后按Enter键继续...")
    time.sleep(3)
    driver.get('http://www.scihuber.com/e/action/ShowInfo.php?classid=186&id=2699')  # 打开的页面
    #
    # WebDriverWait(driver, wait_time).until(
    #     EC.visibility_of_element_located((By.CSS_SELECTOR, '[name=_eventId_proceed]'))
    # )
    # driver.find_element(By.CSS_SELECTOR, '[name=_eventId_proceed]').click()

    # driver.execute_script("window.open('" + linkBtn1.get_attribute('href') + "');")
    # 切换
    driver.switch_to.window(driver.window_handles[-1])
    arr = [{'name': 'CHILD MALTREATMENT', 'skip': None}]
    # arr = [{'name': 'Trauma Violence & Abuse', 'skip': 615}, {'name': 'CHILD MALTREATMENT'}]
    init = True
    for item in arr:
        search(item['name'], init)
        if item['skip'] is not None:
            # 跳转
            nextPage = driver.find_element(By.CSS_SELECTOR, '#snNextPageTop')
            nextPage.clear()
            nextPage.send_keys(item['skip'])
            nextPage.send_keys(Keys.RETURN)  # 模拟按下回车键
        get_qikan_data(10, item['name'], False)
        # get_qikan_data(10, keyword, True)
        init = False
    # 关闭浏览器
    # driver.quit()
