# 使用with语句打开文件，确保文件正确关闭
import requests as requests

from bs4 import BeautifulSoup
from bs4 import UnicodeDammit
import json
import traceback
# import chardet

from cnki.getCookie import getCookie


# print(d)
def downloadItemPdf(item, cookieStr):
    # with open(item['pageNum'] + '/' + 'detail.txt', mode="w", encoding="utf-8") as f:
    # res = requests.post('http://localhost:1728/qikan/journal/getByAccessionNo',
    #                     json={'accessionNo': item['accessionNo']})
    # if res.json().get('data') == '0':

    headers1 = {

        "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
        "Accept-Encoding": "gzip, deflate, br, zstd",
        "Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
        "Cache-Control": "no-cache",
        "Connection": "keep-alive",
        "Cookie": cookieStr + ' UserDownLoads=;',
        "Host": "kns.cnki.net",
        "Origin": "https://kns.cnki.net",
        "Pragma": "no-cache",
        "Referer": "https://kns.cnki.net/kns8s/defaultresult/index?crossids=YSTT4HG0%2CLSTPFY1C%2CJUP3MUPD%2CMPMFIG1A%2CWQ0UVIAA%2CBLZOG7CK%2CPWFIRAGL%2CEMRPGLPA%2CNLBO1Z6R%2CNN3FJMUV&korder=SU&kw=%E7%A4%BE%E4%BC%9A%E5%B7%A5%E4%BD%9C",
        "sec-ch-ua": "\"Not/A)Brand\";v=\"8\", \"Chromium\";v=\"126\", \"Google Chrome\";v=\"126\"",
        "sec-ch-ua-mobile": "?0",
        "sec-ch-ua-platform": "'macOS'",
        "Sec-Fetch-Dest": "document",
        "Sec-Fetch-Mode": "navigate",
        "Sec-Fetch-Site": "same-origin",
        "Sec-Fetch-User": "?1",
        "Upgrade-Insecure-Requests": "1",
        "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36",
    }
    item_content = ''
    try:
        response = requests.get(item['url']
                                , headers=headers1)
        item_content = response.content
        print(item_content)
        # assert item_content.rfind('知网节超时验证') == -1, '知网节超时验证'
        # # with open("item.txt", "r") as file:
        # #     item_content = file.read()
        #
        # soup = BeautifulSoup(item_content, 'html.parser')
        #
        # d = {}
        # for row in soup.select('.row'):
        #     t = row.text.strip()
        #     index = t.find('：')
        #     if index != -1:
        #         key = t[:index]
        #         value = t[index + 1:]
        #         d[key] = value
        #
        # # print(d)
        # item['abstractInfo'] = d.pop('正文快照', d.pop('摘要', None))
        # item['keywords'] = d.pop('关键词', None)
        # item['infoData'].update(d)

    except Exception as e:
        print(e)
        traceback.print_exc()

        with open("item/" + item['title'] + "_" + str(e) + ".txt", "w") as file:
            # 读取整个文件内容
            file.write(item_content)

    print(item['title'], "保存")
    # requests.post('http://localhost:1728/qikan/journal/save', json=item)
    # else:
    #     print(item['title'], "该论文已存在")


# print(d)
def getItemData(item, cookieStr):
    res = requests.post('http://localhost:1728/qikan/journal/getByAccessionNo',
                        json={'accessionNo': item['accessionNo']})
    if res.json().get('data') == '0':
        headers1 = {

            "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
            # "Accept-Encoding": "gzip, deflate, br, zstd",
            # "Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
            "Cache-Control": "no-cache",
            "Connection": "keep-alive",
            "Cookie": cookieStr,
            "Host": "kns.cnki.net",
            "Origin": "https://kns.cnki.net",
            "Pragma": "no-cache",
            "Referer": "https://kns.cnki.net/kns8s/defaultresult/index?crossids=YSTT4HG0%2CLSTPFY1C%2CJUP3MUPD%2CMPMFIG1A%2CWQ0UVIAA%2CBLZOG7CK%2CPWFIRAGL%2CEMRPGLPA%2CNLBO1Z6R%2CNN3FJMUV&korder=SU&kw=%E7%A4%BE%E4%BC%9A%E5%B7%A5%E4%BD%9C",
            "sec-ch-ua": "\"Not/A)Brand\";v=\"8\", \"Chromium\";v=\"126\", \"Google Chrome\";v=\"126\"",
            "sec-ch-ua-mobile": "?0",
            "sec-ch-ua-platform": "'macOS'",
            "Sec-Fetch-Dest": "document",
            "Sec-Fetch-Mode": "navigate",
            "Sec-Fetch-Site": "same-origin",
            "Sec-Fetch-User": "?1",
            "Upgrade-Insecure-Requests": "1",
            "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36",
        }
        item_content = ''
        try:
            response = requests.get(item['url']
                                    , headers=headers1)
            item_content = UnicodeDammit(response.content).unicode_markup
            assert item_content.rfind('知网节超时验证') == -1, '知网节超时验证'
            # with open("item.txt", "r") as file:
            #     item_content = file.read()

            soup = BeautifulSoup(item_content, 'html.parser')

            d = {}
            for row in soup.select('.row'):
                t = row.text.strip()
                index = t.find('：')
                if index != -1:
                    key = t[:index]
                    value = t[index + 1:]
                    d[key] = value

            # print(d)
            item['abstractInfo'] = d.pop('正文快照', d.pop('摘要', None))
            item['keywords'] = d.pop('关键词', None)
            item['infoData'].update(d)

        except Exception as e:
            print(e)
            traceback.print_exc()

            with open("item" + item['title'] + "_" + str(e) + ".txt", "w") as file:
                # 读取整个文件内容
                file.write(item_content)

        print(item['title'], "保存")
        requests.post('http://localhost:1728/qikan/journal/save', json=item)
    else:
        print(item['title'], "该论文已存在")


def getList(pageNum):
    headers = {

        "Accept": "*/*",
        "Accept-Encoding": "gzip, deflate, br",
        "Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
        "Cache-Control": "no-cache",
        "Connection": "keep-alive",
        "Content-Length": "1124",
        "Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
        "Cookie": cookieStr,
        "Host": "kns.cnki.net",
        "Origin": "https://kns.cnki.net",
        "Pragma": "no-cache",
        "Referer": "https://kns.cnki.net/kns8s/defaultresult/index?crossids=YSTT4HG0%2CLSTPFY1C%2CJUP3MUPD%2CMPMFIG1A%2CWQ0UVIAA%2CBLZOG7CK%2CPWFIRAGL%2CEMRPGLPA%2CNLBO1Z6R%2CNN3FJMUV&korder=SU&kw=%E7%A4%BE%E4%BC%9A%E5%B7%A5%E4%BD%9C",
        "sec-ch-ua": "\"Not/A)Brand\";v=\"8\", \"Chromium\";v=\"126\", \"Google Chrome\";v=\"126\"",
        "sec-ch-ua-mobile": "?0",
        "sec-ch-ua-platform": "'macOS'",
        "Sec-Fetch-Dest": "empty",
        "Sec-Fetch-Mode": "cors",
        "Sec-Fetch-Site": "same-origin",
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/100.0.4896.60 Safari/537.36",
        "X-Requested-With": "XMLHttpRequest"
    }

    payload = {
        'boolSearch': 'true'
        ,
        'QueryJson': '{"Platform":"","Resource":"CROSSDB","Classid":"WD0FTY92","Products":"","QNode":{"QGroup":[{"Key":"Subject","Title":"","Logic":0,"Items":[{"Field":"SU","Value":"社会工作","Operator":"TOPRANK","Logic":0,"Title":"主题"}],"ChildItems":[]}]},"ExScope":1,"SearchType":2,"Rlang":"CHINESE","KuaKuCode":"YSTT4HG0,LSTPFY1C,JUP3MUPD,MPMFIG1A,WQ0UVIAA,BLZOG7CK,PWFIRAGL,EMRPGLPA,NLBO1Z6R,NN3FJMUV","SearchFrom":4}'
        , 'pageNum': pageNum
        , 'pageSize': 50
        , 'sortField': 'PT'
        , 'sortType': 'desc'
        , 'dstyle': 'listmode'
        ,
        'boolSortSearch': 'false'
        ,
        'productStr': 'YSTT4HG0,LSTPFY1C,RMJLXHZ3,JQIRZIYA,JUP3MUPD,1UR4K4HZ,BPBAFJ5S,R79MZMCB,MPMFIG1A,WQ0UVIAA,NB3BWEHK,XVLO76FD,HR1YT1Z9,BLZOG7CK,PWFIRAGL,EMRPGLPA,J708GVCE,ML4DRIDX,NLBO1Z6R,NN3FJMUV,'
        , 'aside': '主题：社会工作'
        , 'searchFrom': '资源范围：总库'
    }
    # 2、 通过快递单号获取快递公司信息
    response = requests.post("https://kns.cnki.net/kns8s/brief/grid", headers=headers, data=payload)
    # print(response.content)
    unicode_string = response.content.decode('utf-8')
    # print(unicode_string)

    # with open("test.txt", "r") as file:
    #     # 读取整个文件内容
    #     unicode_string = file.read()
    assert unicode_string.rfind('请输入验证码') == -1, '知网节超时验证'
    assert unicode_string.rfind('知网节超时验证') == -1, '知网节超时验证'
    # with open("list.txt", "w") as file:
    #     # 读取整个文件内容
    #     file.write(unicode_string)
    soup = BeautifulSoup(unicode_string, 'html.parser')

    # 分页数量
    class_citation = soup.find(class_="countPageMark")
    print(class_citation.text.strip())
    pageSize = class_citation.text.strip().split("/")[1]

    list = soup.select("#gridTable  table > tbody > tr")

    datas = []
    for tr in list:
        name = tr.select('td.name > a')[0]
        d = {
            'accessionNo': tr.select('td.operat > a.icon-collect')[0]['data-filename'],
            'type': 10,
            'pageNum': pageNum,
            'sourceWebsite': '知网',
            'language': 'zh',
            'datasource': '社会工作',
            'documentName': name.text.strip(),
            'url': name['href'],
            'author': tr.select('td.author')[0].text.strip(),
            'source': tr.select('td.source')[0].text.strip(),
            'database': tr.select('td.data').text.strip(),
            'date': tr.select('td.date').text.strip(),
            'infoData': {
                'download_url': tr.select('td.operat > a')[0]['href'],
            }
        }
        # try:
        #     getItemData(d)
        # except Exception as e:
        #     print(e)
        #     traceback.print_exc()
        datas.append(d)

    return pageSize


if __name__ == "__main__":
    # getCookie()

    with open("cookieStr.txt", "r") as file:
        # 读取整个文件内容
        cookieStr = file.read()
    print(cookieStr)
    # downloadItemPdf({
    #     'url': 'https://bar.cnki.net//bar.cnki.net/docgateway/api/distribute/route?q=bKR4qWrVR2mFlwkqe8HnNmM%2FtCHoA%2BhEw8AFE9Ue9%2BEQNh%2BfBCiKtXVTYvQmi9iODsUnNLuyjJKZbLJw2gN%2FpSwQ9Cc8VEb1ZIFwZp3WLuk%2B2E7EhMu%2FsQSVvTrhhKcpxJ8oOEHQoQ5JOxK9dNRudsrgAzdgsW3NtFOZwvcm6y1a4i7o2jz6vwIDz1G5HRiALqYv%2FlQMOI3BrojM%2B7kbNm9w8pVevXiJ5n18eAbfXeAjEIckBhWItWUnTMNtpkyBYLvTYBt3KHAYzKoYgRpxbrPZj343E8w5IkExVsvWRA2WPzRm6JQF28TgDeCyy3pu&invoice=r6ir9ew93HsgBoZKJbJJn0ebgn4b7I40KDYKq08jdKaKqDddswGOlEpKdGCPxvsZUhdHjUTMsL3jLHM7WGmX7nP9D%2FC3UDxXy%2FEZW4yf8askPkwLPHU73CoA0ve6y73zlwfa%2FU8OoboNA5EodLFyMgyyaGibvE1RJ2Bg30Oauvc%3D'
    # }, cookieStr)
    pageNum = 1
    try:
        while pageNum <= 300:

            pageSize = getList(pageNum)
            print(pageSize)
            if pageSize == pageNum:
                break
            pageNum += 1
    except Exception as e:
        print(e)
        traceback.print_exc()
    print("结束当前：", pageNum)
    # print(datas)

    # 打开文件
    # with open("data.json", "w", encoding="utf-8") as file:
    #     # 写入JSON字符串到文件
    #     json.dump(datas, file, ensure_ascii=False)
