from typing import List
from urllib.parse import quote
import requests
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.common.keys import Keys
from config import webdriverPath, hostUrl, download_default_directory
import time, keyboard


def get_page(item, host, Cookie):
    ky = item['documentName']
    author = item['author']
    # req = (
    #         'boolSearch=true&QueryJson={"Platform":"","Resource":"CROSSDB","Classid":"WD0FTY92","Products":"","QNode":{"QGroup":[{"Key":"Subject","Title":"","Logic":0,"Items":[{"Field":"SU","Value":'
    #         '"' + ky + '","Operator":"TOPRANK","Logic":0,"Title":"主题"}],"ChildItems":[]}]},"ExScope":1,"SearchType":2,"Rlang":"CHINESE","KuaKuCode":"YSTT4HG0,LSTPFY1C,JUP3MUPD,MPMFIG1A,EMRPGLPA,WQ0UVIAA,BLZOG7CK,PWFIRAGL,NN3FJMUV,NLBO1Z6R","SearchFrom":1}&pageNum=1&pageSize=20&sortField=ZH&dstyle=listmode&boolSortSearch=false&productStr=YSTT4HG0,LSTPFY1C,RMJLXHZ3,JQIRZIYA,JUP3MUPD,1UR4K4HZ,BPBAFJ5S,R79MZMCB,MPMFIG1A,EMRPGLPA,J708GVCE,ML4DRIDX,WQ0UVIAA,NB3BWEHK,XVLO76FD,HR1YT1Z9,BLZOG7CK,PWFIRAGL,NN3FJMUV,NLBO1Z6R,&aside=主题：'
    #         + ky + '&searchFrom=资源范围：总库&CurPage=1'
    # )
    req = (
            'boolSearch=true&QueryJson={"Platform":"","Resource":"CROSSDB","Classid":"WD0FTY92","Products":"","QNode":{"QGroup":[{"Key":"Subject","Title":"","Logic":0,"Items":[],"ChildItems":[{"Key":"input[data-tipid=gradetxt-1]","Title":"主题","Logic":0,"Items":['
            '{"Key":"input[data-tipid=gradetxt-1]","Title":"主题","Logic":0,"Field":"SU","Operator":"TOPRANK","Value":"' + ky + '","Value2":""}],"ChildItems":[]},'
                                                                                                                                '{"Key":"input[data-tipid=gradetxt-2]","Title":"作者","Logic":0,"Items":[{"Key":"input[data-tipid=gradetxt-2]","Title":"作者","Logic":0,"Field":"AU","Operator":"DEFAULT","Value":"' + author + '","Value2":""}],"ChildItems":[]}]},'
                                                                                                                                                                                                                                                                                                                                '{"Key":"ControlGroup","Title":"","Logic":0,"Items":[],"ChildItems":[]}]},"ExScope":"1","SearchType":1,"Rlang":"CHINESE","KuaKuCode":"YSTT4HG0,LSTPFY1C,JUP3MUPD,MPMFIG1A,EMRPGLPA,WQ0UVIAA,BLZOG7CK,PWFIRAGL,NN3FJMUV,NLBO1Z6R","SearchFrom":1}'
                                                                                                                                                                                                                                                                                                                                '&pageNum=1&pageSize=20&sortField=ZH&sortType=&dstyle=listmode&boolSortSearch=false&sentenceSearch=false&productStr=YSTT4HG0,LSTPFY1C,RMJLXHZ3,JQIRZIYA,JUP3MUPD,1UR4K4HZ,BPBAFJ5S,R79MZMCB,MPMFIG1A,EMRPGLPA,J708GVCE,ML4DRIDX,WQ0UVIAA,NB3BWEHK,XVLO76FD,HR1YT1Z9,BLZOG7CK,PWFIRAGL,NN3FJMUV,NLBO1Z6R,'
                                                                                                                                                                                                                                                                                                                                '&aside=（主题：' + ky + '）AND（作者：' + author + '(精确)）&searchFrom=资源范围：总库;++中英文扩展;++时间范围：更新时间：不限;++&CurPage=1'
    )
    datastr = ''
    for e in req.split('&'):
        s1 = e[0:e.find('=')]
        s2 = e[e.find('=') + 1:]
        if datastr == '':
            datastr = s1 + "=" + quote(s2, "utf-8")
            continue
        datastr = datastr + '&' + s1 + "=" + quote(s2, "utf-8")
    # encoded_string = quote(original_string)
    # 参数要从第二页的接口参数开始获取
    # datastr = f'boolSearch=false&QueryJson=%7B%22Platform%22%3A%22%22%2C%22Resource%22%3A%22CROSSDB%22%2C%22Classid%22%3A%22WD0FTY92%22%2C%22Products%22%3A%22%22%2C%22QNode%22%3A%7B%22QGroup%22%3A%5B%7B%22Key%22%3A%22Subject%22%2C%22Title%22%3A%22%22%2C%22Logic%22%3A0%2C%22Items%22%3A%5B%5D%2C%22ChildItems%22%3A%5B%7B%22Key%22%3A%22input%5Bdata-tipid%3Dgradetxt-1%5D%22%2C%22Title%22%3A%22%E4%B8%BB%E9%A2%98%22%2C%22Logic%22%3A0%2C%22Items%22%3A%5B%7B%22Key%22%3A%22input%5Bdata-tipid%3Dgradetxt-1%5D%22%2C%22Title%22%3A%22%E4%B8%BB%E9%A2%98%22%2C%22Logic%22%3A0%2C%22Field%22%3A%22SU%22%2C%22Operator%22%3A%22TOPRANK%22%2C%22Value%22%3A%22%E7%A4%BE%E4%BC%9A%E5%B7%A5%E4%BD%9C%22%2C%22Value2%22%3A%22%22%7D%5D%2C%22ChildItems%22%3A%5B%5D%7D%2C%7B%22Key%22%3A%22input%5Bdata-tipid%3Dgradetxt-2%5D%22%2C%22Title%22%3A%22%E5%85%A8%E6%96%87%22%2C%22Logic%22%3A1%2C%22Items%22%3A%5B%7B%22Key%22%3A%22input%5Bdata-tipid%3Dgradetxt-2%5D%22%2C%22Title%22%3A%22%E5%85%A8%E6%96%87%22%2C%22Logic%22%3A1%2C%22Field%22%3A%22FT%22%2C%22Operator%22%3A%22DEFAULT%22%2C%22Value%22%3A%22%E7%A4%BE%E4%BC%9A%E5%B7%A5%E4%BD%9C%22%2C%22Value2%22%3A%22%22%7D%5D%2C%22ChildItems%22%3A%5B%5D%7D%5D%7D%2C%7B%22Key%22%3A%22ControlGroup%22%2C%22Title%22%3A%22%22%2C%22Logic%22%3A0%2C%22Items%22%3A%5B%5D%2C%22ChildItems%22%3A%5B%7B%22Key%22%3A%22span%5Bvalue%3DPT%5D%22%2C%22Title%22%3A%22%22%2C%22Logic%22%3A0%2C%22Items%22%3A%5B%7B%22Key%22%3A%22span%5Bvalue%3DPT%5D%22%2C%22Title%22%3A%22%E5%8F%91%E8%A1%A8%E6%97%B6%E9%97%B4%22%2C%22Logic%22%3A0%2C%22Field%22%3A%22PT%22%2C%22Operator%22%3A7%2C%22Value%22%3A%22{d1}%22%2C%22Value2%22%3A%22{d2}%22%7D%5D%2C%22ChildItems%22%3A%5B%5D%7D%5D%7D%5D%7D%2C%22ExScope%22%3A%221%22%2C%22SearchType%22%3A1%2C%22Rlang%22%3A%22CHINESE%22%2C%22KuaKuCode%22%3A%22YSTT4HG0%2CLSTPFY1C%2CJUP3MUPD%2CMPMFIG1A%2CWQ0UVIAA%2CBLZOG7CK%2CPWFIRAGL%2CEMRPGLPA%2CNLBO1Z6R%2CNN3FJMUV%22%2C%22SearchFrom%22%3A4%7D&pageNum={pageNum}&pageSize=50&sortField=PT&sortType=ASC&dstyle=listmode&boolSortSearch=false&sentenceSearch=false&productStr=YSTT4HG0%2CLSTPFY1C%2CRMJLXHZ3%2CJQIRZIYA%2CJUP3MUPD%2C1UR4K4HZ%2CBPBAFJ5S%2CR79MZMCB%2CMPMFIG1A%2CWQ0UVIAA%2CNB3BWEHK%2CXVLO76FD%2CHR1YT1Z9%2CBLZOG7CK%2CPWFIRAGL%2CEMRPGLPA%2CJ708GVCE%2CML4DRIDX%2CNLBO1Z6R%2CNN3FJMUV%2C&aside=&searchFrom=%E8%B5%84%E6%BA%90%E8%8C%83%E5%9B%B4%EF%BC%9A%E6%80%BB%E5%BA%93%3B++%E4%B8%AD%E8%8B%B1%E6%96%87%E6%89%A9%E5%B1%95%3B++%E6%97%B6%E9%97%B4%E8%8C%83%E5%9B%B4%EF%BC%9A%E5%8F%91%E8%A1%A8%E6%97%B6%E9%97%B4%EF%BC%9A{d1}%E5%88%B0{d2}%3B%E6%9B%B4%E6%96%B0%E6%97%B6%E9%97%B4%EF%BC%9A%E4%B8%8D%E9%99%90%3B++'
    # boolSearch=true&QueryJson=%7B%22Platform%22%3A%22%22%2C%22Resource%22%3A%22CROSSDB%22%2C%22Classid%22%3A%22WD0FTY92%22%2C%22Products%22%3A%22%22%2C%22QNode%22%3A%7B%22QGroup%22%3A%5B%7B%22Key%22%3A%22Subject%22%2C%22Title%22%3A%22%22%2C%22Logic%22%3A0%2C%22Items%22%3A%5B%5D%2C%22ChildItems%22%3A%5B%7B%22Key%22%3A%22input%5Bdata-tipid&pageNum=1&pageSize=20&sortField=ZH&sortType=&dstyle=listmode&boolSortSearch=false&sentenceSearch=false&productStr=YSTT4HG0%2CLSTPFY1C%2CRMJLXHZ3%2CJQIRZIYA%2CJUP3MUPD%2C1UR4K4HZ%2CBPBAFJ5S%2CR79MZMCB%2CMPMFIG1A%2CEMRPGLPA%2CJ708GVCE%2CML4DRIDX%2CWQ0UVIAA%2CNB3BWEHK%2CXVLO76FD%2CHR1YT1Z9%2CBLZOG7CK%2CPWFIRAGL%2CNN3FJMUV%2CNLBO1Z6R%2C&aside=%EF%BC%88%E4%B8%BB%E9%A2%98%EF%BC%9A%E4%B8%BB%E6%8C%81%E4%BA%BA%E7%9A%84%E8%AF%9D%EF%BC%89AND%EF%BC%88%E4%BD%9C%E8%80%85%EF%BC%9A%E8%91%9B%E5%85%86%E5%85%89%28%E7%B2%BE%E7%A1%AE%29%EF%BC%89&searchFrom=%E8%B5%84%E6%BA%90%E8%8C%83%E5%9B%B4%EF%BC%9A%E6%80%BB%E5%BA%93%3B%2B%2B%E4%B8%AD%E8%8B%B1%E6%96%87%E6%89%A9%E5%B1%95%3B%2B%2B%E6%97%B6%E9%97%B4%E8%8C%83%E5%9B%B4%EF%BC%9A%E6%9B%B4%E6%96%B0%E6%97%B6%E9%97%B4%EF%BC%9A%E4%B8%8D%E9%99%90%3B%2B%2B&CurPage=1
    headers = {
        "Accept": "*/*",
        "Accept-Encoding": "gzip, deflate, br, zstd",
        "Cache-Control": "no-cache",
        "Connection": "keep-alive",
        "Content-Length": "1488",
        "Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
        "Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
        "Cookie": Cookie,
        "Host": host[host.find('//') + 2:],
        "Origin": host,
        "Pragma": "no-cache",
        "sec-ch-ua": "\"Not/A)Brand\";v=\"8\", \"Chromium\";v=\"126\", \"Google Chrome\";v=\"126\"",
        "sec-ch-ua-mobile": "?0",
        "sec-ch-ua-platform": "'macOS'",
        "Sec-Fetch-Dest": "empty",
        "Sec-Fetch-Mode": "cors",
        "Sec-Fetch-Site": "same-origin",
        "proxy-connection": "keep-alive",
        "Referer": host + "/kns8s/defaultresult/index",
        "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36",
        "X-Requested-With": "XMLHttpRequest"
    }
    response = requests.post(host + "/kns8s/brief/grid", headers=headers, data=datastr)

    unicode_string = response.content.decode('utf-8')
    print(unicode_string)

    # with open("test.txt", "r") as file:
    #     # 读取整个文件内容
    #     unicode_string = file.read()
    assert unicode_string!='<script>setTimeout(ddata,100);</script>' or len(unicode_string) >100, 'setTimeout 垃圾网站 页面不存在或者已被删除'
    assert unicode_string.rfind('您今天的下载次数已用尽') == -1, '您今天的下载次数已用尽'
    assert unicode_string.rfind('页面不存在') == -1, '页面不存在'
    assert unicode_string.rfind('请输入验证码') == -1, '知网节超时验证'
    assert unicode_string.rfind('知网节超时验证') == -1, '知网节超时验证'
    # with open("list.txt", "w") as file:
    #     # 读取整个文件内容
    #     file.write(unicode_string)
    soup = BeautifulSoup(unicode_string, 'html.parser')

    # 分页数量
    # class_citation = soup.find(class_="pagerTitleCell")
    # print(class_citation.text.strip())
    # pageSize = class_citation.text.strip().split("/")[1]

    list = soup.select("#gridTable  table > tbody > tr")

    datas = []
    for tr in list:
        try:
            name = tr.select('td.name a')[0]
            d = {
                'accessionNo': tr.select('td.operat > a.icon-collect')[0]['data-filename'],
                'type': 10,
                'sourceWebsite': '知网',
                'language': 'zh',
                'datasource': '社会工作',
                'documentName': name.text.strip(),
                'url': name['href'],
                'author': tr.select('td.author')[0].text.strip(),
                'source': tr.select('td.source')[0].text.strip(),
                'database': tr.select('td.data')[0].text.strip(),
                'date': tr.select('td.date')[0].text.strip(),
                'infoData': {
                    'download_url': tr.select('td.operat a')[0]['href'],
                }
            }
            # d['download_url'] getDownloadUrl(d)
            if item['accessionNo'] == d['accessionNo']:
                return d
        except Exception as e:
            print(e)
    print(datas)
    return None


class WOS_spider:
    i = 0
    item = None

    def set(self, item):
        self.i = item['currQuantity']
        self.list = []
        self.item = item
        self.citationList = []

    def run(self):
        # url_root = 'https://webofscience-clarivate-cn-s.era.lib.swjtu.edu.cn/wos/alldb/basic-search'
        # 创建ChromeOptions对象
        chrome_options = webdriver.ChromeOptions()
        # 禁止加载图片等资源
        chrome_options.add_argument("--disable-images")
        chrome_options.add_argument("--disable-plugins")
        chrome_options.add_argument("--disable-extensions")
        # 设置全屏参数
        chrome_options.add_argument("--start-maximized")
        prefs = {
            "download.default_directory": download_default_directory,
        }
        chrome_options.add_experimental_option("prefs", prefs)
        # 创建WebDriver对象时传入ChromeOptions
        self.driver = webdriver.Chrome(options=chrome_options, service=Service(webdriverPath))

        # 创建WebDriver对象时传入ChromeOptions
        self.driver = webdriver.Chrome(options=chrome_options, service=Service(webdriverPath))
        self.driver.get('http://www.scihuber.com/e/member/login/')  # 打开的页面

        # # 登录
        # WebDriverWait(driver, wait_time).until(
        #     EC.visibility_of_element_located((By.ID, 'username'))
        # )
        self.driver.find_element(By.ID, 'username').send_keys('413624')
        self.driver.find_element(By.ID, 'password').send_keys('945103480729')
        self.driver.find_element(By.CSS_SELECTOR, '#maincolumn .lBtn').click()
        # input("请手动操作至论文详情页面,完成后按Enter键继续...")
        time.sleep(0.5)
        # href="/e/action/ShowInfo.php?classid=1&id=5292"
        self.driver.get('http://www.scihuber.com/e/action/ShowInfo.php?classid=1&id=3084')  # 打开的页面
        time.sleep(4)

    def getCookie(self):
        # 获取所有cookies
        cookies: List[dict] = self.driver.get_cookies()

        cookieStr = ''
        # 打印cookies
        for cookie in cookies:
            cookieStr = cookieStr + (cookie['name'] + '=' + cookie['value'] + '; ')
        # self.driver.quit()
        # 打开文件
        with open("cookieStr.txt", "w") as file:
            # 写入JSON字符串到文件
            file.write(cookieStr)
        print(cookieStr)
        return cookieStr


def exec():
    ws = WOS_spider()
    ws.run()
    Cookie = ws.getCookie()
    time.sleep(1)
    host = (ws.driver.current_url)[0: ws.driver.current_url.find("/", 8)]
    b = True
    while b:
        res = requests.get(hostUrl + '/literature/file/geLiteratureFile')

        if res.json()['success']:
            try:
                item = res.json()['data']
                data = get_page(item, host, Cookie)
                if data is None:
                    continue
                time.sleep(1)
                url = host + data['url']
                # /kcms2/article/abstract?v=MdENDFpkZq5lxzhgayMaWZ1JwJ_nzn8enl3Yrdtrnoq6vSI1KBAVU3lvHU1YIsigTfrlKB0AJn-QmEJhN95bY5JXKwa2oR9O89UmddikTD50DTERJV21KuVjNOd_Ln0yLbpY5QktknUKE_m4codaav4Hom1WX7f7s-DZbH9JDXj63v1UnJnZuxRqzX3FkcI5wKWHNpAb9i0=&uniplatform=NZKPT&language=CHS&article-identify=广东建立超7万个“两新”党组织-马立敏&file-type=pdf
                # /kcms2/article/abstract?v=MdENDFpkZq4RZDYwLIURpvcTdsXA4iGWZTuwvogd8Sp9V09Lz79hBUAj4EV1vNYL5QiAdEPypFevlVZ5FS03MNybzIJV7NoJU6Ry7vozveuy7D53EOQrdU_MjKq9FUY1_lDINOi8WRCV2Rz2hLc6WmBFi3tSidejgHLtgCo36Osf3LknCl8mpfPEU5QM1ApC8qj2ssV1llM=&amp;uniplatform=NZKPT&amp;language=CHS&amp;article-identify=广东建立超7万个“两新”党组织-马立敏&amp;file-type=pdf
                # /kcms2/article/abstract?v=MdENDFpkZq4RZDYwLIURpvcTdsXA4iGWZTuwvogd8Sp9V09Lz79hBUAj4EV1vNYL5QiAdEPypFevlVZ5FS03MNybzIJV7NoJU6Ry7vozveuy7D53EOQrdU_MjKq9FUY1_lDINOi8WRCV2Rz2hLc6WmBFi3tSidejgHLtgCo36Osf3LknCl8mpfPEU5QM1ApC8qj2ssV1llM=&uniplatform=NZKPT&language=CHS&article-identify=广东建立超7万个“两新”党组织-马立敏&file-type=pdf"
                # /kcms2/article/abstract?v=MdENDFpkZq560gDQ7cveUUagMtD8m3YhGhIqkj4cmLQX3AwJqaljvKg8mOnH1Wpppjt0RIaNPEFMT4xQJOwQzpTcTw6W8Q_M5eOs9eQvO8ynch8Z3OZRInUEOImyGwprMQHFr80SlFIgdq3JCZcsG7D6w9YT3vbw1O9qxSijK7ESma_8c0QVHuLjVz360GXP&uniplatform=NZKPT&language=CHS&article-identify=洞悉与化解债务难题：从宏观调控体系到基于社会互惠原则的资产监控体系-王雍君&file-type=pdf
                # time.sleep(1)
                ws.driver.get(url)  # 打开的页面
                # 或者等待直到某个元素可见
                # try:
                #     WebDriverWait(ws.driver, 7).until(EC.visibility_of_element_located((By.ID, 'clk1')))
                # except Exception:
                # 如果失败刷新一下页面
                # ws.driver.get(url)  # 打开的页面
                # WebDriverWait(ws.driver, 5).until(EC.visibility_of_element_located((By.ID, 'clk1')))
                # item['fullText'] = ws.driver.find_element(By.ID, 'clk1').get_attribute('href')
                # print(item['fullText'])
                requests.post(hostUrl + '/literature/file/wosSave', json=item)
                time.sleep(1)
            except Exception as e:
                print('错误', res.json()['data'])
                print(e)
                # 判断是否下载量没了
                if str(e).find('页面不存在') > -1:
                    print('下载量已满')
                    ws.driver.quit()
                    return
                if str(e).find('下载次数已用尽') > -1:
                    print('下载量已满')
                    ws.driver.quit()
                    return
                if ws.driver.find_element(By.CSS_SELECTOR, 'body').text.find('下载量已满')>-1:
                    print('下载量已满')
                    ws.driver.quit()
                    return
        else:
            b = False
            print('没有数据')


if __name__ == '__main__':
    # getDownloadUrl({'accessionNo': 'SHGO199505015', 'type': 10, 'sourceWebsite': '知网', 'language': 'zh',
    #                 'datasource': '社会工作', 'documentName': '社会欢迎《社会工作》——读《社会工作》杂志',
    #                 'url': 'https://kns.cnki.net/kcms2/article/abstract?v=th5-mUcNE0Pb01MUZQC7tkluJxrN-UNC1T0KHpDsUcPv_Crxz3g539RQEChSIOXXSSiKEeHX3_IVelT5CHgOhQdUsVX1LYTQVfhTDCCOsZshMo4Ie_0k9AJkRuxVbcxWcpDf2c_s_lTPwzd2WQtjWB4z0TrcMviEoNxBAmuIbFPeX5lLiRQvirvduic5HKIT&uniplatform=NZKPT&language=CHS',
    #                 'author': '韩京承', 'source': '社会工作', 'database': '期刊', 'date': '1995-05-15', 'infoData': {
    #         'download_url': 'https://bar.cnki.net/bar/download/order?id=Pmp%2F5es3RB81cmauPwpbvqjULCl2dfRobTjOEx7RvfLXcAnP8r1CXQr3sOk9lHqSmoEzy4NnkhV4%2BDCYeinV12MNXnhfv1%2F7LsNt6JxQZzYYz7hIz%2Bm4gioz%2Fd0uFLWAlsQo%2BpRvK7ckfm79OSQVfSBnBa%2BGi6z13JPvtSv3a9ygQ3rcCWK8v00%2FaZoGrk3n53LIMt7aOCQohELxZPJP8Z3kip4eutUjzyi4LMJ3WzowfYdxjGOeoWVZl7YToWNbdygFAZVSLIHG9BPLv2JAig%3D%3D'}})

    # res = requests.get('http://localhost:1728/qikan/literature/file/setPage?initialValue=4200')
    b = True
    while b:
        try:
            exec()
            b = False
        except Exception as e:
            print(e)
            b = True
    # FDDX202405019

def exec3():
    b = True
    while b:
        try:
            exec()
            b = False
        except Exception as e:
            print(e)
            b = True