# 使用with语句打开文件，确保文件正确关闭
import requests as requests

from bs4 import BeautifulSoup
import json
import traceback

from cnki.getCookie import getCookie


# print(d)
def getItemData(item, cookieStr):
    # res = requests.post('http://localhost:1728/qikan/journal/getByAccessionNo',
    #                     json={'accessionNo': item['accessionNo']})
    # if res.json().get('data') == '0':
    headers1 = {

        "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
        "Accept-Encoding": "gzip, deflate, br, zstd",
        "Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
        "Cache-Control": "no-cache",
        "Connection": "keep-alive",
        "Cookie": cookieStr,
        "Host": "kns.cnki.net",
        "Origin": "https://kns.cnki.net",
        "Pragma": "no-cache",
        "Referer": "https://kns.cnki.net/kns8s/defaultresult/index?crossids=YSTT4HG0%2CLSTPFY1C%2CJUP3MUPD%2CMPMFIG1A%2CWQ0UVIAA%2CBLZOG7CK%2CPWFIRAGL%2CEMRPGLPA%2CNLBO1Z6R%2CNN3FJMUV&korder=SU&kw=%E7%A4%BE%E4%BC%9A%E5%B7%A5%E4%BD%9C",
        "sec-ch-ua": "\"Not/A)Brand\";v=\"8\", \"Chromium\";v=\"126\", \"Google Chrome\";v=\"126\"",
        "sec-ch-ua-mobile": "?0",
        "sec-ch-ua-platform": "'macOS'",
        "Sec-Fetch-Dest": "document",
        "Sec-Fetch-Mode": "navigate",
        "Sec-Fetch-Site": "same-origin",
        "Sec-Fetch-User": "?1",
        "Upgrade-Insecure-Requests": "1",
        "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36",
    }
    item_content = ''
    try:
        response = requests.get(item['url']
                                , headers=headers1)
        item_content = response.content.decode('utf-8')
        assert item_content.rfind('知网节超时验证') == -1, '知网节超时验证'
        # with open("item.txt", "r") as file:
        #     item_content = file.read()

        soup = BeautifulSoup(item_content, 'html.parser')

        d = {}
        for row in soup.select('.row'):
            t = row.text.strip()
            index = t.find('：')
            if index != -1:
                key = t[:index]
                value = t[index + 1:]
                d[key] = value

        # print(d)
        item['abstractInfo'] = d.pop('正文快照', d.pop('摘要', None))
        item['keywords'] = d.pop('关键词', None)
        item['infoData'].update(d)

    except Exception as e:
        print(e)
        traceback.print_exc()

        with open("item/" + item['title'] + "_" + str(e) + ".txt", "w") as file:
            # 读取整个文件内容
            file.write(item_content)

    print(item['title'], "保存")
    requests.post('http://localhost:1728/qikan/journal/save', json=item)
    # else:
    #     print(item['title'], "该论文已存在")


def getList():
    headers = {

        "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
        "Accept-Encoding": "gzip, deflate, br, zstd",
        "Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
        "Cache-Control": "no-cache",
        "Cookie": cookieStr,
        "Pragma": "no-cache",
        "priority": "u=0, i",
        "Referer": "https://kns.cnki.net/kns8s/defaultresult/index?crossids=YSTT4HG0%2CLSTPFY1C%2CJUP3MUPD%2CMPMFIG1A%2CWQ0UVIAA%2CBLZOG7CK%2CPWFIRAGL%2CEMRPGLPA%2CNLBO1Z6R%2CNN3FJMUV&korder=SU&kw=%E7%A4%BE%E4%BC%9A%E5%B7%A5%E4%BD%9C",
        "sec-ch-ua": "\"Not/A)Brand\";v=\"8\", \"Chromium\";v=\"126\", \"Google Chrome\";v=\"126\"",
        "sec-ch-ua-mobile": "?0",
        "sec-ch-ua-platform": "'macOS'",
        "Sec-Fetch-Dest": "document",
        "Sec-Fetch-Mode": "navigate",
        "Sec-Fetch-Site": "none",
        "sec-fetch-user": "?1",
        "upgrade-insecure-requests": "1",
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/100.0.4896.60 Safari/537.36",
    }
    # 2、 通过快递单号获取快递公司信息
    response = requests.get("https://webofscience.clarivate.cn/wos/alldb/summary/bc015c52-d1d8-4871-bbb0-86ff190f7ef0-01019d693b/author-ascending/1", headers=headers)
    # print(response.content)
    unicode_string = response.content.decode('utf-8')
    print(unicode_string)

    # with open("list.txt", "w") as file:
    #     # 读取整个文件内容
    #     file.write(unicode_string)
    soup = BeautifulSoup(unicode_string, 'html.parser')

    return soup


if __name__ == "__main__":
    # getCookie()

    with open("cookieStr.txt", "r") as file:
        # 读取整个文件内容
        cookieStr = file.read()
    print(cookieStr)
    getList()
    # pageNum = 2
    # try:
    #     while pageNum <= 300:
    #
    #         pageSize = getList(pageNum)
    #         print(pageSize)
    #         if pageSize == pageNum:
    #             break
    #         pageNum += 1
    # except Exception as e:
    #     print(e)
    #     traceback.print_exc()
    # print("结束当前：", pageNum)
    # # print(datas)

    # 打开文件
    # with open("data.json", "w", encoding="utf-8") as file:
    #     # 写入JSON字符串到文件
    #     json.dump(datas, file, ensure_ascii=False)

if __name__ == "__main__":
    with open("cookieStr.txt", "r") as file:
        # 读取整个文件内容
        cookieStr = file.read()
    print(cookieStr)

    getItemData({'url':'https://webofscience.clarivate.cn/wos/alldb/full-record/WOS:00094327550000'}, cookieStr)
