# -*- coding:utf-8 -*-

import requests
from lxml import etree
from fake_useragent import UserAgent
from http import cookiejar
import json
from datetime import datetime
from pymongo import MongoClient
import re
import time
import random

from utils.HttpUtils import HttpUtils

"""
中国知网-专业检索
https://kns.cnki.net/kns/brief/result.aspx?dbprefix=SCDB&crossDbcodes=CJFQ,CDFD,CMFD,CPFD,IPFD,CCND,CCJD
"""
class CNKI():
    def __init__(self):
        #声明一个CookieJar对象实例来保存cookie
        self.cookie = cookiejar.CookieJar()
        ua = UserAgent(use_cache_server=False)  # 禁用服务器缓存
        self.headers = {
            "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3",
            "Accept-Encoding": "gzip, deflate, br",
            "Accept-Language": "zh-CN,zh;q=0.9",
            "Connection": "keep-alive",
            "DNT": "1",
            "Host": "kns.cnki.net",
            "Referer": "https://kns.cnki.net/kns/brief/brief.aspx?ctl=fd67c1eb-b5de-4c91-a2a9-f06806e2e14f&dest=%E5%88%86%E7%BB%84%EF%BC%9A%E4%B8%BB%E9%A2%98%20%E6%98%AF%20%E6%95%B0%E6%8D%AE%E5%BA%93&action=5&dbPrefix=SCDB&PageName=ASP.brief_result_aspx&Param=NVSM%e5%85%b3%e9%94%ae%e8%af%8d+%3d+%27%e6%95%b0%e6%8d%ae%e5%ba%93%27&isinEn=1",
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.97 Safari/537.36",
            "Sec-Fetch-Mode": "nested-navigate",
            "Sec-Fetch-Site": "same-origin",
            "Sec-Fetch-User": "?1",
            "Upgrade-Insecure-Requests": "1"
        }


    def get_contents(self, pageNo):
        postData = {
            "curpage": pageNo,  # 页号
            "RecordsPerPage": 50,  # 每页显示条数
            "QueryID": 33,  # 检索ID，限时，每次会变化
            "ID":"",
            "turnpage": 1,
            "tpagemode": "L",
            "dbPrefix": "WWDB",
            "Fields":"",
            "DisplayMode": "listmode",
            "PageName": "ASP.brief_result_aspx",
            "isinEn": 1
        }
        # url = "https://www.propertyguru.com.sg/property-for-sale?market=residential&property_type_code%5B%5D=CONDO&property_type_code%5B%5D=APT&property_type_code%5B%5D=WALK&property_type_code%5B%5D=CLUS&property_type_code%5B%5D=EXCON&property_type=N&freetext=Farrer+Park&newProject=all"
        url = "https://kns.cnki.net/kns/brief/brief.aspx"
        html = HttpUtils.do_request("GET", url, self.headers, postData)
        root = etree.HTML(html.text)
        selector = root.xpath('//table[@class="GridTableContent"]/tr')
        flag = False
        for i in selector:
            dict_data = dict()
            # 去掉表格的标题，第一次解析
            if flag:
                dict_data['title'] = "".join(i.xpath('./td[2]//text()')).replace("\r\n", "").strip() # 题名
                dict_data['zz'] = "".join(i.xpath('./td[3]//text()')).replace("\r\n", "").strip()  # 作者
                dict_data['ly'] = "".join(i.xpath('./td[4]//text()')).replace("\r\n", "").strip()  # 来源
                dict_data['fbrq'] = (i.xpath('./td[5]//text()')[0]).strip()  # 发表日期
                dict_data['sjk'] = (i.xpath('./td[6]//text()')[0]).strip()  # 数据库
                dict_data['url'] = "https://kns.cnki.net" + i.xpath('./td[2]/a/@href')[0]  # 题名链接地址
                dict_data['down_count'] = ("".join(i.xpath('./td[8]//text()')).replace("\r\n", "")).strip()  # 下载次数
                # 标题撤回的没有下载地址链接
                if len(i.xpath('./td[8]/a/@href')) > 0:
                    dict_data["down_url"] = "http://kns.cnki.net/kns/" + i.xpath('./td[8]/a/@href')[0][2:]  # 下载地址
                # 英文版url为小写
                DbCode = re.findall(r"dbcode=(.*?)&", dict_data["url"])[0]
                DbName = re.findall(r"dbname=(.*?)&", dict_data["url"])[0]
                FileName = re.findall(r"filename=(.*?)$", dict_data["url"])[0]
                # if URLID != "":
                #     url_child = "https://kns.cnki.net/KCMS/detail/" + re.findall(r"URLID=(.*?)&",dict_data["url"])[0] + ".html"
                # elif FileName != "":
                #     url_child = f"https://kns.cnki.net/KCMS/detail/detail.aspx?dbcode={DbCode}&dbname={DbName}" \
                #                 f"&filename={FileName}"
                url_child = f"https://kns.cnki.net/KCMS/detail/detail.aspx?dbcode={DbCode}&dbname={DbName}" \
                            f"&filename={FileName}"
                # 获取标题详细信息 摘要、关键词、分类号
                html = HttpUtils.do_request("GET", url_child, self.headers, postData)
                time.sleep(random.randint(1, 1))
                root = etree.HTML(html.text)
                for d in root.xpath('//div[@class="wxBaseinfo wxBaseinfoFull"]/p'):
                    # 以下为英文文献
                    if "ABSTRACT" in d.xpath('.//text()')[0]:
                        dict_data['desc'] = "".join(d.xpath('.//text()')).replace("更多还原", "").strip()  # 摘要
                    elif "KEYWORDS" in d.xpath('.//text()')[0]:
                        # dict_data["keyword"] = "".join(d.xpath('.//text()')).replace("更多还原", "")\
                        #     .replace("\r\n", "").strip() # 关键字
                        dict_data['keyword'] = ""
                        for t in d.xpath('.//text()'):
                            dict_data['keyword'] += t.replace("\r\n", "").replace("更多还原", "").strip()
                    elif "DOI" in d.xpath('.//text()')[0]:
                        dict_data['catalog_ZCDOI'] = "".join(d.xpath('.//text()')) \
                            .replace("\r\n", "").replace(" ", "")  # DOI
                # 获取标题详细信息 摘要、关键词、分类号
                self.insertItem("数据库", dict_data)
            print(str(dict_data))
            flag = True

    def insertItem(self, tableName, data):
        my_set = db[tableName]
        my_set.insert_one(data)


if __name__ == '__main__':
    conn = MongoClient('127.0.0.1', 27017)
    db = conn["Test"]
    cnki = CNKI()
    for i in range(7, 11):
        print(f"=======开始第【{i}】页=========")
        cnki.get_contents(i)