"""
File: WOS_spider.py
Author: Dramwig
Email: dramwig@outlook.com
Date: 2024-02-27
Version: 1.6

Description: This script uses Selenium and BeautifulSoup to scrape detailed paper information from Web of Science (WOS) website.
It navigates through each paper's detail page, extracts key information such as title, citation count, country, journal, etc.,
and saves the collected data into a CSV file.

Please note that this script is intended for educational purposes only, and you should abide by the terms of service and usage policies
of the Web of Science when using it or any derivative work.

"""
import os

from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.common.keys import Keys
from selenium import webdriver
from bs4 import BeautifulSoup
import pandas as pd
import time, keyboard

import requests
import threading
import json

from config import webdriverPath, hostUrl


def parse_html(html):
    soup = BeautifulSoup(html, 'html.parser')

    # 创建一个空的字典
    data_dict = {}
    try:
        class_title = soup.find(id="HiddenSecTa-accessionNo")
        data_dict['accessionNo'] = class_title.text.strip()
        # print('\t' + class_title.text.strip())
    except:
        # 抛出异常
        print("获取ID失败")
        raise ValueError("获取ID失败")

    try:
        containers = soup.find_all('div', class_='cdx-two-column-grid-container')
        infoData = {}
        for container in containers:
            # 在这个容器内找到所有的标签和数据
            labels = container.find_all(class_='cdx-grid-label')
            datas = container.find_all(class_='cdx-grid-data')
            if len(datas) == 0: datas = container.find_all(class_='section-label-data')
            label = labels[0].text.strip()
            data_texts = [data.text.strip() for data in datas]  # 提取数据列表中的文本
            text = '\n'.join(data_texts)  # 将文本连接成一个字符串，使用换行符分隔

            # 存储到字典中
            infoData[label] = text

        # 提取摘要、关键词、类型、来源
        data_dict['keywords'] = infoData.get('Keywords', '')
        data_dict['documentType'] = infoData.get('Document Type', '')
        data_dict['source'] = infoData.get('Source', '')
        # 日期、作者
        data_dict['date'] = infoData.get('Indexed', '')
        data_dict['author'] = infoData.get('By', '')
        data_dict['authorInfo'] = infoData.get('Author Information', '')
        data_dict['classification'] = infoData.get('Categories/ Classification', '')
        data_dict['abstractInfo'] = infoData.get('Abstract', '')
        # 去掉重复数据
        infoData.pop('Abstract', None)
        infoData.pop('Keywords', None)
        infoData.pop('Document Type', None)
        infoData.pop('Source', None)
        infoData.pop('Indexed', None)
        infoData.pop('By', None)
        infoData.pop('Author Information', None)
        infoData.pop('Categories/ Classification', None)
        data_dict['infoData'] = infoData

    except:
        print("解析容器失败")

    try:
        class_title = soup.find(class_="title text--large cdx-title")
        data_dict['documentName'] = class_title.text.strip()
        # print('\t' + class_title.text.strip())
    except:
        print("获取标题失败")

    # 引用论文数量
    try:
        class_citation = soup.find(class_="mat-tooltip-trigger medium-link-number link ng-star-inserted")
        data_dict['citation'] = class_citation.text.strip()
    except:
        data_dict['citation'] = '0'

    # 作者地址
    try:
        class_addresses = soup.find('span', class_='ng-star-inserted', id='FRAOrgTa-RepAddressFull-0')
        # print('\t' + class_addresses.text.strip())
        data_dict['country'] = class_addresses.text.split(',')[-1].strip()
    except:
        try:
            class_addresses = soup.find('span', class_='value padding-right-5--reversible')
            # print('\t查询规则2：' + class_addresses.text.strip())
            data_dict['country'] = class_addresses.text.split(',')[-1].strip()
        except:
            print("获取国家失败")
    #
    # try:
    #     class_journal = soup.find(
    #         class_="mat-focus-indicator mat-tooltip-trigger font-size-14 summary-source-title-link remove-space no-left-padding mat-button mat-button-base mat-primary font-size-16 ng-star-inserted")
    #     data_dict['journal'] = class_journal.text.strip()
    # except:
    #     print("获取期刊失败")

    try:
        input_box = soup.find(class_='wos-input-underline page-box', id="snNextPageTop")  # 获取包含输入框的标签
        index = int(input_box['aria-label'].split()[-1].replace(",", ""))
    except:
        print("获取页码失败")

    return index, data_dict


# pip install
# import PyExecJS
class WOS_spider:

    def __init__(self, keyword, i):
        self.keyword = keyword
        self.i = i
        self.listEnd = False
        self.list = []
        self.citationList = []

    def run(self):
        # url_root = 'https://webofscience-clarivate-cn-s.era.lib.swjtu.edu.cn/wos/alldb/basic-search'
        # 创建ChromeOptions对象
        chrome_options = webdriver.ChromeOptions()
        # 禁止加载图片等资源
        chrome_options.add_argument("--disable-images")
        chrome_options.add_argument("--disable-plugins")
        chrome_options.add_argument("--disable-extensions")
        # 设置全屏参数
        chrome_options.add_argument("--start-maximized")

        # 创建WebDriver对象时传入ChromeOptions
        self.driver = webdriver.Chrome(options=chrome_options, service=Service(webdriverPath))
        self.driver.get('http://www.scihuber.com/e/member/login/')  # 打开的页面

        # # 登录
        # WebDriverWait(driver, wait_time).until(
        #     EC.visibility_of_element_located((By.ID, 'username'))
        # )
        self.driver.find_element(By.ID, 'username').send_keys('056958')
        self.driver.find_element(By.ID, 'password').send_keys('13653800549')
        self.driver.find_element(By.CSS_SELECTOR, '#maincolumn .lBtn').click()
        # input("请手动操作至论文详情页面,完成后按Enter键继续...")
        time.sleep(5)
        self.driver.get('http://www.scihuber.com/e/action/ShowInfo.php?classid=186&id=3324')  # 打开的页面
        time.sleep(5)

    def closeCookie(self):
        try:
            # 关闭管理Cookie栏
            WebDriverWait(self.driver, 10).until(
                EC.visibility_of_element_located((By.CSS_SELECTOR, '#onetrust-close-btn-container > button'))
            )
            time.sleep(5)
            self.driver.find_element(By.CSS_SELECTOR, '#onetrust-close-btn-container > button').click()

            WebDriverWait(self.driver, 10).until(
                EC.visibility_of_element_located((By.CLASS_NAME, 'selects'))
            )
        except:
            print('not cookie')

    def search(self):
        # 切换url https://www.webofscience.com/wos/alldb/basic-search
        # driver.href =
        # driver.get('https://www.webofscience.com/wos/alldb/basic-search')
        self.driver.get('https://webofscience.clarivate.cn/wos/alldb/basic-search')
        # 搜索 准备爬数据
        self.driver.find_element(By.CLASS_NAME, 'selects').click()
        # 切换选项 出版物/来源出版物名称
        self.driver.find_element(By.CSS_SELECTOR,
                                 '#global-select > div.options-and-search > div > div[title="Publication/Source Titles"]').click()

        WebDriverWait(self.driver, 10).until(
            EC.visibility_of_element_located((By.ID, 'search-option'))
        )
        # 输入搜索内容 清空内容后输入
        search_option = self.driver.find_element(By.ID, 'search-option')
        search_option.clear()
        search_option.send_keys(self.keyword)
        # 失去焦点
        webdriver.ActionChains(self.driver).send_keys(Keys.ESCAPE).perform()
        # 点击搜索
        self.driver.find_element(By.CSS_SELECTOR,
                                 '#snSearchType > div.button-row > button.mat-focus-indicator.search.mat-flat-button.mat-button-base.mat-primary').click()
        # 判断是否搜索成功
        time.sleep(0.2)
        error_code = self.driver.find_elements(By.CSS_SELECTOR,
                                               '#snSearchType > div.search-error.error-code.light-red-bg.ng-star-inserted')
        if len(error_code) > 0:
            print("搜索失败")
            return

        try:

            WebDriverWait(self.driver, 5).until(
                EC.visibility_of_element_located((By.CSS_SELECTOR, '#pendo-base ._pendo-close-guide'))
            )
            # 或者等待直到某个元素可见
            self.driver.find_element(By.CSS_SELECTOR,
                                     '#pendo-base ._pendo-close-guide').click()
        except:
            print("")
        # self.click0()

    def scroll(self):
        scrollHeight = self.driver.execute_script("return document.body.scrollHeight")
        scroll = 500
        # 滑动页面直到找到元素
        while scroll < scrollHeight + 5000:
            # 滑动到页面底部
            self.driver.execute_script("window.scrollTo(0, " + str(scroll) + ");")
            scroll = scroll + 500

            time.sleep(0.4)

    def getList(self):
        condition = True
        i = 0
        while condition:
            i = i + 1
            print(self.keyword + "。第" + str(i) + "页")
            self.scroll()
            # 获取列表所有的URL
            elements = self.driver.find_elements(By.CSS_SELECTOR, ".summary-record-parent")
            for element in elements:
                try:
                    title = element.find_element(By.CSS_SELECTOR, '.title')
                    if title is None:
                        continue
                except Exception as e:
                    print('无标题')
                    continue
                infoData = {}
                item = {
                    'title': title.text,
                    'url': title.get_attribute('href'),
                    'type': '10',
                    'dataSource': self.keyword,
                    'infoData': infoData,
                }
                try:
                    links = element.find_elements(By.CSS_SELECTOR, 'app-summary-record-links a')
                    if links is not None:
                        for link in links:
                            infoData[link.text] = link.get_attribute('href')
                except Exception as e:
                    print('无链接')

                try:
                    citations = element.find_element(By.CSS_SELECTOR, '.citations a')
                    item['citation'] = citations.get_attribute('data-stat-value'),
                    item['citationurl'] = citations.get_attribute('href')
                except:
                    # print('无引用')
                    item['citation1'] = '无'

                self.list.append(item)

            # 手动操作，比如切换标签页等
            nextBtn = self.driver.find_element(By.CSS_SELECTOR, '[data-ta="next-page-button"]')
            if 'mat-button-disabled' not in nextBtn.get_attribute('class'):
                # 切换到下一页
                nextBtn.click()
                # condition = False
            # input("请手动操作至论文详情页面,完成后按Enter键继续...")
            else:
                condition = False

    def getCitationList(self):
        for item in self.list:
            i = self.list.index(item)
            if i < self.i:
                continue
            if i != 0 and i % 100 == 0:
                print("100次重新启动")
                self.driver.quit()
                self.run()
                self.closeCookie()
                time.sleep(10)
            self.i = i
            print(self.keyword + "。" + str(len(self.list)) + "列表 第" + str(self.i) + "页")
            if 'citationurl' not in item:
                continue  # self.list.index(item)
            self.driver.get(item['citationurl'])
            self.driver.switch_to.window(self.driver.window_handles[-1])

            condition = True
            while condition:
                self.scroll()
                # 获取列表所有的URL
                elements = self.driver.find_elements(By.CSS_SELECTOR, ".summary-record-parent")
                for element in elements:
                    try:
                        title = element.find_element(By.CSS_SELECTOR, '.title')
                        if title is None:
                            continue
                    except Exception as e:
                        print('无标题')
                        continue
                    infoData = {}
                    item = {
                        'title': title.text,
                        'url': title.get_attribute('href'),
                        'type': '20',
                        'dataSource': item['url'],
                        'infoData': infoData,
                    }
                    try:
                        links = element.find_elements(By.CSS_SELECTOR, 'app-summary-record-links a')
                        if links is not None:
                            for link in links:
                                infoData[link.text] = link.get_attribute('href')
                    except Exception as e:
                        print('无链接')

                    try:
                        citations = element.find_element(By.CSS_SELECTOR, '.citations a')
                        item['citation'] = citations.get_attribute('data-stat-value'),
                        item['citationurl'] = citations.get_attribute('href')
                    except:
                        item['citation'] = '无引用'

                    self.citationList.append(item)
                try:

                    # 手动操作，比如切换标签页等
                    nextBtn = self.driver.find_element(By.CSS_SELECTOR, '[data-ta="next-page-button"]')
                    if 'mat-button-disabled' not in nextBtn.get_attribute('class'):
                        # 切换到下一页
                        nextBtn.click()
                    # input("请手动操作至论文详情页面,完成后按Enter键继续...")
                    else:
                        condition = False
                except:
                    print('无下一页')
                    condition = False

    def click0(self):
        # 获取搜索数量 然后点击列表进入详情页面开始爬数据
        WebDriverWait(self.driver, 10).until(
            EC.visibility_of_element_located((By.CSS_SELECTOR,
                                              'app-base-summary-component app-records-list > app-record app-summary-title a:nth-child(1)'))
        )
        # 点击第一个
        self.driver.find_element(By.CSS_SELECTOR,
                                 'app-base-summary-component app-records-list > app-record app-summary-title a:nth-child(1)').click()

        # 跳转
        if self.i > 0:
            time.sleep(5)
            nextPage = self.driver.find_element(By.CSS_SELECTOR, '#snNextPageTop')
            nextPage.clear()
            nextPage.send_keys(self.i)
            nextPage.send_keys(Keys.RETURN)  # 模拟按下回车键
        else:
            self.i = 0

    def get_qikan_data1(self, list):
        for item in list:
            i = self.list.index(item)
            if i < self.i:
                continue
            self.i = i
            print(self.keyword + "。" + str(len(a.list)) + "列表 第" + str(a.i) + "页")
            if 'url' not in item:
                continue  # self.list.index(item)
            self.driver.get(item['url'])
            self.driver.switch_to.window(self.driver.window_handles[-1])
            # 等待页面加载
            # 或者等待直到某个元素可见
            WebDriverWait(self.driver, 10).until(
                EC.visibility_of_element_located((By.CSS_SELECTOR,
                                                  'app-page-controls .pagination > div > button:nth-child(4)'))
            )
            time.sleep(1)

            # 解析HTML
            html = self.driver.page_source
            index, data = parse_html(html)
            # 类型来源
            data['type'] = item['type']
            data['dataSource'] = item['dataSource']
            # 获取当前url
            data['url'] = item['url']
            # print(data)
            print(self.keyword, "：正在处理第", index, "篇论文")
            # res = requests.post(hostUrl+'/journal/getByAccessionNo',
            #                     json={'accessionNo': data['accessionNo']})
            # if res.json().get('data') == '0':
            #     print(self.keyword, "保存")
            requests.post(hostUrl + '/journal/save', json=data)
            # else:
            #     print(self.keyword, "该论文已存在")

    def get_qikan_data(self):
        condition = True
        while condition and self.i < 10000000000:
            # 等待页面加载
            # 或者等待直到某个元素可见
            WebDriverWait(self.driver, 10).until(
                EC.visibility_of_element_located((By.CSS_SELECTOR,
                                                  'app-page-controls .pagination > div > button:nth-child(4)'))
            )
            time.sleep(1)

            url = self.driver.current_url

            # 解析HTML
            html = self.driver.page_source
            index, data = parse_html(html)
            # 类型来源
            data['type'] = '10'
            data['dataSource'] = self.keyword
            # 获取当前url
            data['url'] = url
            # print(data)
            print(self.keyword, "：正在处理第", index, "篇论文")
            res = requests.post('http://localhost:1728/qikan/journal/getByAccessionNo',
                                json={'accessionNo': data['accessionNo']})
            if res.json().get('data') == '0':
                print(self.keyword, "保存")
                requests.post('http://localhost:1728/qikan/journal/save', json=data)
            else:
                print(self.keyword, "该论文已存在")

            old_url = self.driver.current_url
            # 手动操作，比如切换标签页等
            nextBtn = self.driver.find_element(By.CSS_SELECTOR,
                                               'app-page-controls .pagination > div > button:nth-child(4)')
            if 'mat-button-disabled' not in nextBtn.get_attribute('class'):
                # 切换到下一页
                nextBtn.click()
            # input("请手动操作至论文详情页面,完成后按Enter键继续...")
            else:
                condition = False

            self.i = self.i + 1


def exec_Wos(item):
    isError = True
    i = item['i']
    errorNum = 0
    while isError:
        isError = False
        a = None
        try:
            a = WOS_spider(item['keyword'], i)
            a.run()
            a.closeCookie()

            keywordStr = './data/' + item['keyword']
            if os.path.exists(keywordStr + "_list.json"):
                with open(keywordStr + "_list.json", 'r') as file:
                    # 写入JSON字符串到文件
                    a.list = json.load(file)
            else:
                # 搜索
                a.search()
                # 获取列表
                a.getList()
                with open(keywordStr + "_list.json", "w", encoding="utf-8") as file:
                    # 写入JSON字符串到文件
                    json.dump(a.list, file)
                # 不重复执行
                a.listEnd = True
                a.i = 0
            # 获取引用文章
            if os.path.exists(keywordStr + "_citationList.json"):
                with open(keywordStr + "_citationList.json", 'r') as file:
                    a.citationList = json.load(file)
                a.getCitationList()

            else:
                a.getCitationList()
            with open(keywordStr + "_citationList.json", "w", encoding="utf-8") as file:
                # 写入JSON字符串到文件
                json.dump(a.citationList, file)
            # a.click0()
            # a.get_qikan_data()

            a.driver.quit()
            item['size'] = a.i
            item['i'] = -1
        except Exception as e:
            print(e)
            print(item, ":: 第", i, "页出错")
            isError = True
            a.driver.quit()
            item['i'] = a.i
            errorNum = errorNum + 1
            if (errorNum > 5):
                item['errorNum'] = errorNum
                item['errorMsg'] = str(e)
            with open("data.json", "w") as file:
                # 写入JSON字符串到文件
                json.dump(arr, file)


def arrThread(subArr):
    for item in subArr:
        if item['i'] == -1:
            continue
        exec_Wos(item)
        with open("data.json", "w") as file:
            # 写入JSON字符串到文件
            json.dump(arr, file)


def split_list(lst, num_partitions):
    """Split lst into num_partitions parts."""
    k, m = divmod(len(lst), num_partitions)
    return (lst[i * k + min(i, m):(i + 1) * k + min(i + 1, m)] for i in range(num_partitions))


# test 读取搜索列表、引用列表 、读取列表后再去 读取明细
if __name__ == "__main__":
    with open("data.json", 'r') as file:
        arr = json.load(file)
    arr1 = []
    for item in arr:

        if item['i'] == -1:
            continue
        arr1.append(item)
    # 打开文件
    with open("data.json", "w") as file:
        # 写入JSON字符串到文件
        json.dump(arr, file)
    # 创建两个线程
    # arrThread(arr1)
    threads = list()
    for i in split_list(arr1, 4):
        thread = threading.Thread(target=arrThread, args=(i,))
        threads.append(thread)
        thread.start()  # 启动线程
        time.sleep(20)
    # # 创建两个线程
    # threads = list()
    # index = 0
    # while index < len(arr):
    #     item = arr[index]
    #     # for item in arr :
    #     if item['i'] == -1:
    #         index = index + 1
    #         continue
    #
    #     size = 0
    #     while size < 2:
    #         item = arr[index]
    #         size = size + 1
    #         thread = threading.Thread(target=exec_Wos, args=(item,))
    #         threads.append(thread)
    #         thread.start()  # 启动线程
    #         time.sleep(20)
    #         index = index + 1
    #     # 等待然后重新开始
    #
    #     size = 0
    #     # 打开文件
    #     with open("data.json", "w") as file:
    #         # 写入JSON字符串到文件
    #         json.dump(arr, file)
