# -*- coding:utf-8 -*-

import requests
from lxml import etree
from fake_useragent import UserAgent
from http import cookiejar
import json
from datetime import datetime
from pymongo import MongoClient
import re
import time
import random
import urllib.parse

requests.packages.urllib3.disable_warnings()  # 忽略HTTPS安全警告

"""
中国知网-专业检索
https://kns.cnki.net/kns/brief/result.aspx?dbprefix=SCDB&crossDbcodes=CJFQ,CDFD,CMFD,CPFD,IPFD,CCND,CCJD
搜索关键词依据Referer
"""
class CNKI():
    def __init__(self):
        #声明一个CookieJar对象实例来保存cookie
        self.cookie = cookiejar.CookieJar()
        # ua = UserAgent(use_cache_server=False)  # 禁用服务器缓存
        self.headers = {
            "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3",
            "Accept-Encoding": "gzip, deflate, br",
            "Accept-Language": "zh-CN,zh;q=0.9",
            "Connection": "keep-alive",
            "DNT": "1",
            "Host": "kns.cnki.net",
            "Referer": "https://kns.cnki.net/kns/brief/brief.aspx?ctl=e76a03da-f506-49fc-9993-55adf9b01895&dest=%E5%88%86%E7%BB%84%EF%BC%9A%E4%B8%BB%E9%A2%98%20%E6%98%AF%20%E8%BD%AF%E4%BB%B6%E5%B7%A5%E7%A8%8B&action=5&dbPrefix=SCDB&PageName=ASP.brief_result_aspx&Param=NVSM%e5%85%b3%e9%94%ae%e8%af%8d+%3d+%27%e8%bd%af%e4%bb%b6%e5%b7%a5%e7%a8%8b%27&isinEn=1",
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.97 Safari/537.36",
            "Sec-Fetch-Mode": "nested-navigate",
            "Sec-Fetch-Site": "same-origin",
            "Sec-Fetch-User": "?1",
            "Upgrade-Insecure-Requests": "1",
            "COOKIE": 'ASP.NET_SessionId=n3adbrukue2qnqpfxxxhmsbi; Ecp_ClientId=6200814164700629688; SID_kns=123106; SID_klogin=125142; Ecp_IpLoginFail=200814119.116.73.96; SID_kns_new=kns123123; KNS_SortType=; RsPerPage=20; SID_krsnew=125132; cnkiUserKey=c20c8cfd-630a-a6f5-f913-281c6e0326db; _pk_ses=*'
        }


    def get_contents(self, pageNo, keyword):
        postData = {
            "curpage": pageNo,  # 页号
            "RecordsPerPage": 50,  # 每页显示条数
            "QueryID": 6,  # 检索ID，限时，每次会变化
            "ID":"",
            "turnpage": 1,
            "tpagemode": "L",
            "dbPrefix": "SCDB",
            "Fields":"",
            "DisplayMode": "listmode",
            "PageName": "ASP.brief_result_aspx",
            # "ctl": "e76a03da-f506-49fc-9993-55adf9b01895",
            # "Param": "NVSM关键词 = '大数据'",
            # "sKuaKuID": 65,
            "isinEn": 1
        }
        # 搜索关键词依据Referer
        self.headers['Referer'] = f'https://kns.cnki.net/kns/brief/brief.aspx?pagename=ASP.brief_default_result_aspx&isinEn=1&dbPrefix=SCDB&dbCatalog=中国学术文献网络出版总库&ConfigFile=SCDBINDEX.xml&research=off&t={int(time.time() * 1000)}&keyValue={keyword}&S=1&sorttype='
        self.headers['Referer'] = urllib.parse.quote(self.headers['Referer'])
        url = "https://kns.cnki.net/kns/brief/brief.aspx"
        html = HttpUtils.do_request("GET", url, self.headers, postData)
        root = etree.HTML(html.text)
        selector = root.xpath('//table[@class="GridTableContent"]/tr')
        flag = False
        for i in selector:
            dict_data = dict()
            # 去掉表格的标题，第一次解析
            if flag:
                dict_data['title'] = "".join(i.xpath('./td[2]//text()')).replace("\r\n", "").replace(" ", "")  # 题名
                dict_data['zz'] = "".join(i.xpath('./td[3]//text()')).replace("\r\n", "").replace(" ", "")  # 作者
                dict_data['ly'] = "".join(i.xpath('./td[4]//text()')).replace("\r\n", "").replace(" ", "")  # 来源
                dict_data['fbrq'] = (i.xpath('./td[5]//text()')[0]).strip()  # 发表日期
                dict_data['sjk'] = (i.xpath('./td[6]//text()')[0]).strip()  # 数据库
                dict_data['url'] = "https://kns.cnki.net" + i.xpath('./td[2]/a/@href')[0]  # 题名链接地址
                dict_data['down_count'] = ("".join(i.xpath('./td[8]//text()')).replace("\r\n", "")).strip()  # 下载次数
                # 标题撤回的没有下载地址链接
                if len(i.xpath('./td[8]/a/@href')) > 0:
                    dict_data["down_url"] = "http://kns.cnki.net/kns/" + i.xpath('./td[8]/a/@href')[0][2:]  # 下载地址
                # 标题的链接地址分2种情况组成。取值分别为：URLID、filename
                DbCode = re.findall(r"DbCode=(.*?)&", dict_data["url"])[0]
                DbName = re.findall(r"DbName=(.*?)&", dict_data["url"])[0]
                URLID = re.findall(r"URLID=(.*?)&", dict_data["url"])[0]
                FileName = re.findall(r"FileName=(.*?)&", dict_data["url"])[0]
                # if URLID != "":
                #     url_child = "https://kns.cnki.net/KCMS/detail/" + re.findall(r"URLID=(.*?)&",dict_data["url"])[0] + ".html"
                # elif FileName != "":
                #     url_child = f"https://kns.cnki.net/KCMS/detail/detail.aspx?dbcode={DbCode}&dbname={DbName}" \
                #                 f"&filename={FileName}"
                url_child = f"https://kns.cnki.net/KCMS/detail/detail.aspx?dbcode={DbCode}&dbname={DbName}" \
                            f"&filename={FileName}"
                # 获取标题详细信息 摘要、关键词、分类号
                html = HttpUtils.do_request("GET", url_child, self.headers, postData)
                time.sleep(random.randint(1, 2))
                root = etree.HTML(html.text)
                for d in root.xpath('//div[@class="wxBaseinfo"]/p'):
                    if "摘要" in d.xpath('.//text()')[0]:
                        dict_data['desc'] = "".join(d.xpath('.//text()')).replace("更多还原", "")  # 摘要
                    elif "关键词" in d.xpath('.//text()')[0]:
                        dict_data['keyword'] = "".join(d.xpath('.//text()'))\
                            .replace("\r\n", "").replace(" ", "")  # 关键词
                    elif "基金" in d.xpath('.//text()')[0]:
                        dict_data['catalog_FUND'] = "".join(d.xpath('.//text()')) \
                            .replace("\r\n", "").replace(" ", "")  # 基金
                    elif "DOI" in d.xpath('.//text()')[0]:
                        dict_data['catalog_ZCDOI'] = "".join(d.xpath('.//text()')) \
                            .replace("\r\n", "").replace(" ", "")  # DOI
                    elif "分类号" in d.xpath('.//text()')[0]:
                        dict_data['catalog_ZTCLS'] = "".join(d.xpath('.//text()'))  # 分类号
                    # 以下来源为“报纸”与期刊等来源不同，无摘要。
                    elif "正文快照" in d.xpath('.//text()')[0]:
                        dict_data['desc'] = "".join(d.xpath('.//text()')).replace("更多还原", "")  # 摘要
                    # 以下为英文文献
                    elif "ABSTRACT" in d.xpath('.//text()')[0]:
                        dict_data['desc'] = "".join(d.xpath('.//text()')).replace("更多还原", "")  # 摘要
                    elif "KEYWORDS" in d.xpath('.//text()')[0]:
                        dict_data['keyword'] = "".join(d.xpath('.//text()')).replace("更多还原", "")  # 摘要
                # 获取标题详细信息 摘要、关键词、分类号
                self.insertItem("软件工程", dict_data)
            print(str(dict_data))
            flag = True


    def insertItem(self, tableName, data):
        my_set = db[tableName]
        my_set.insert_one(data)


if __name__ == '__main__':
    conn = MongoClient('127.0.0.1', 27017)
    db = conn["Test"]
    cnki = CNKI()
    for i in range(1, 11):
        print(f"=======开始第【{i}】页=========")
        cnki.get_contents(i, "大数据")
