from calendar import c
from msilib.schema import File
from operator import le
from traceback import print_tb
from wsgiref.util import FileWrapper
from xml.etree.ElementTree import QName
from pandas import qcut
from regex import F, R
import requests
from lxml import etree
import re
import time
import random
import sys
import io
sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf8')

class Cnki():
    def __init__(self) -> None:
        self.qkName = ''
        pass

    def down(self):
        with open('cnki%s.csv' % time.time(), "w+") as ff:
            # time.sleep(random.randint(1, 100)/10)
            header = {
                'Origin': 'https://navi.cnki.net',
                'Referer': 'https://navi.cnki.net/knavi/',
                'Sec-Fetch-Site': 'same-origin',
                'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/100.0.4896.127 Safari/537.36 Edg/100.0.1185.50',
            }
            keyUrl = 'https://navi.cnki.net/knavi/all/categories'
            data = {
                'productcode': 'ALL',
                'index': 'subject',
                'isClick': 1,
                'random': random.random()
            }
            html = requests.post(keyUrl, data=data,  headers=header)
            selector = etree.HTML(html.text)
            li = selector.xpath('//li')
            key = {}
            for x in li:
                keyName = x.xpath('span/a/@title')
                keyTitle = x.xpath('dl/dd/a/@title')
                keyValue = x.xpath('dl/dd/a/@onclick')
                kv = []
                for x in keyValue:
                    kv.append(re.match(r'.*"CCL","(\w+)",.*', x).group(1))
                key[keyName[0]] = dict(zip(keyTitle, kv))
            qkName = self.qkName or input('请输入要搜索的期刊类型：')
            for (k, v) in key.items():
                if(qkName in v.keys()):
                    qkCode = v[qkName]
                    break
            print(qkCode)
            pageUrl = "https://navi.cnki.net/knavi/all/searchbaseinfo"
            data = {
                'searchStateJson': '{"StateID":"","Platfrom":"","QueryTime":"","Account":"knavi","ClientToken":"","Language":"","CNode":{"PCode":"ALL","SMode":"","OperateT":""},"QNode":{"SelectT":"","Select_Fields":"","S_DBCodes":"","QGroup":[{"Key":"Navi","Logic":1,"Items":[],"ChildItems":[{"Key":"all","Logic":1,"Items":[{"Key":"subject","Title":"","Logic":1,"Name":"CCL","Operate":"","Value":"%s?","ExtendType":0,"ExtendValue":"","Value2":""}],"ChildItems":[]}]}],"OrderBy":"","GroupBy":"","Additon":""}}' % qkCode,
                'displaymode': 1,
                'pageindex': 1,
                'pagecount': 10,
                'index': 'subject',
                'searchType': '来源名称',
                'clickName': qkName,
                'switchdata': 'leftnavi',
                'random': random.random()
            }
            html = requests.post(pageUrl, data=data, headers=header)
            pages = self.getPN(html)
            page = ['名称', '曾用刊名', '主办单位', 'ISSN', 'CN',
                    '复合影响因子', '综合影响因子', '被引次数', '下载次数', '链接']
            ff.write('\t'.join(page)+'\n')

            for x in range(1, pages+1):
                print('第%s页' % x)
                time.sleep(random.randint(1, 100)/10)
                data['pageindex'] = x
                html = requests.post(pageUrl, data=data, headers=header)
                for y in self.getPT(html):
                    ff.write('\t'.join(y)+'\n')
                ff.writable()
                print('%s::第%s页' % (qkName, x), flush=True)

    def getPN(self, html):
        selector = etree.HTML(html.text)
        return int(selector.xpath('//*[@id="lblPageCount"]/text()')[0])

    def getPT(self, html):
        page = []
        selector = etree.HTML(html.text)
        title = selector.xpath(
            '//*[@id="searchResult"]/dl/dd/div[2]/h1/a/@title')
        href = selector.xpath(
            '//*[@id="searchResult"]/dl/dd/div[2]/h1/a/@href')
        href = ['https://navi.cnki.net/%s' % x for x in href]
        tt = []
        ul = selector.xpath('//*[@id="searchResult"]/dl/dd/div[2]/ul')
        for x in ul:
            t = {}
            for y in x.xpath('li/span/text()'):
                t[y.strip().split('：')[0]] = y.strip().split('：')[1]
            t2 = []
            try:
                t2.append(t['曾用刊名'])
            except:
                t2.append('')
            try:
                t2.append(t['主办单位'])
            except:
                t2.append('')
            try:
                t2.append(t['ISSN'])
            except:
                t2.append('')
            try:
                t2.append(t['CN'])
            except:
                t2.append('')
            try:
                t2.append(t['复合影响因子'])
            except:
                t2.append('')
            try:
                t2.append(t['综合影响因子'])
            except:
                t2.append('')
            try:
                t2.append(t['被引次数'])
            except:
                t2.append('')
            try:
                t2.append(t['下载次数'])
            except:
                t2.append('')
            tt.append(t2)
        title = [[x] for x in title]
        href = [[x] for x in href]
        for x in range(len(title)):
            print(title[x]+tt[x]+href[x], flush=True)

            page.append(title[x]+tt[x]+href[x])

        return page


Cnki = Cnki()
Cnki.down()
