import traceback
from sqlite3 import connect
from symbol import return_stmt

from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.common.keys import Keys
from selenium import webdriver
from bs4 import BeautifulSoup
import pandas as pd
import time, keyboard
from urllib.parse import urlparse, parse_qs
import requests
import threading
import json
import datetime
from datetime import date

from cnki.date import get_previous_months_dates
from cnki.getCookie import getCookie
from config import webdriverPath, hostUrl, download_default_directory


class cnki:

    def __init__(self, list):
        self.list = list

    def download_file(self, item):
        item_url = self.driver.current_url
        # 点击下载
        self.driver.find_element(By.CSS_SELECTOR, '#pdfDown').click()
        time.sleep(0.3)
        self.driver.switch_to.window(self.driver.window_handles[-1])
        # 还在当前页面 跳过
        time.sleep(0.3)

        try:
            if item_url == self.driver.current_url:
                print(item['documentName'], "下载失败，没有跳转下载也")
                # self.driver.close()
                return
        except Exception as e:
            # self.driver.switch_to.window(self.driver.window_handles[-1])
            print(item['documentName'], "下载失败，没有跳转下载也")
            # driver.close()
            return

        try:
            WebDriverWait(self.driver, 2).until(
                EC.visibility_of_element_located((By.CSS_SELECTOR,
                                                  '#zhifu'))
            )
            print(item['documentName'], "下载失败需要支付")
            time.sleep(0.3)
            self.driver.close()
            time.sleep(0.3)
            self.driver.switch_to.window(self.driver.window_handles[-1])
            return
        except Exception as e:
            print(item['documentName'], "开始下载")

        # driver.find_element(By.CSS_SELECTOR, '#download_input1').click()

        # 获取当前页面的URL
        # 解析URL
        # parsed_url = urlparse(driver.current_url)

        # 获取查询字符串参数
        # query_params = parse_qs(parsed_url.query)
        downloadUrl = self.driver.execute_async_script("""
    var done = arguments[arguments.length - 1]; // 异步回调函数
    var res
        $.ajax({
            type: "post",
            url: BaseUrl.api + "/downloadFee/confimDownLoad",
            async: true,
            contentType: "application/json",
            data: JSON.stringify({
            "cacheId": cacheID,
            "lang": "",
        }),
            dataType: "json",
            success: function (result) {
                done(result); // 当AJAX请求成功时，调用done函数
            },
            error: function (xmlHttpRequest, textStatus, errorThrown) {
                done({code : 400}); // 当AJAX请求失败时，调用done函数
            }
        })
        """)
        # r = json.loads(downloadUrl)
        if downloadUrl['code'] == 200000:
            # blob = requests.get()
            # item['fullText'] = blob.content
            item['fullText'] = "https:" + downloadUrl['data']['downloadUrl']
            print("下载成功")
        self.driver.close()
        time.sleep(0.3)
        self.driver.switch_to.window(self.driver.window_handles[-1])

    def getItemData(self, item):
        res = requests.post(hostUrl + '/literature/getByAccessionNo',
                            json={'accessionNo': item['accessionNo']})
        if res.json().get('data') == '0':
            self.driver.get(item['url'])

            # self.driver.switch_to.window(self.driver.window_handles[-1])
            try:
                # 等待tr渲染出来
                WebDriverWait(self.driver, 5).until(
                    EC.visibility_of_element_located((By.CSS_SELECTOR, '.row'))
                )
            except Exception as e:
                return
            item_content = self.driver.page_source
            try:
                soup = BeautifulSoup(item_content, 'html.parser')

                d = {}
                for row in soup.select('.rowtit'):
                    # t = row.text.strip()
                    # index = t.find('：')
                    # row.select('.rowtit')[0].fin
                    # if index != -1:
                    # key = t[:index]
                    key = row.text.strip().replace('：', '')
                    # value = t[index + 1:]
                    value = row.find_next_sibling().text.strip()
                    d[key] = value

                # print(d)
                item['abstractInfo'] = d.pop('正文快照', d.pop('摘要', None))
                item['keywords'] = d.pop('关键词', None)
                item['doi'] = d.pop('DOI', None)
                item['infoData'] = (d)
                # 下载
                # try:
                #     self.download_file(item)
                # except Exception as e:
                #     print('下载失败：')
            except Exception as e:
                print(e)
                traceback.print_exc()

            # with open(item['documentName'] + "_" + str(e) + ".txt", "w") as file:
            # 读取整个文件内容
            # file.write(item_content)

            print(item['documentName'], "保存")
            time.sleep(0.3)
            # try:
            #     self.driver.switch_to.window(self.driver.window_handles[-1])
            #     if len(self.driver.window_handles) > 1:
            #         self.driver.close()
            #         self.driver.switch_to.window(self.driver.window_handles[0])
            # except Exception as e:
            #     print("关闭失败")
            requests.post(hostUrl + '/literature/save', json=item)
        else:
            print(item['documentName'], "该论文已存在")

    def arr_exec(self):
        # 创建ChromeOptions对象
        chrome_options = webdriver.ChromeOptions()
        # 禁止加载图片等资源
        chrome_options.add_argument("--disable-images")
        chrome_options.add_argument("--disable-plugins")
        chrome_options.add_argument("--disable-extensions")
        # 设置全屏参数
        chrome_options.add_argument("--start-maximized")
        prefs = {
            "download.default_directory": download_default_directory,
        }
        chrome_options.add_experimental_option("prefs", prefs)

        # 创建WebDriver对象时传入ChromeOptions
        self.driver = webdriver.Chrome(options=chrome_options, service=Service(webdriverPath))
        self.driver.get('http://www.scihuber.com/e/member/login/')  # 打开的页面

        # # 登录
        # WebDriverWait(driver, wait_time).until(
        #     EC.visibility_of_element_located((By.ID, 'username'))
        # )
        self.driver.find_element(By.ID, 'username').send_keys('917612')
        self.driver.find_element(By.ID, 'password').send_keys('02690773584')
        self.driver.find_element(By.CSS_SELECTOR, '#maincolumn .lBtn').click()
        # input("请手动操作至论文详情页面,完成后按Enter键继续...")
        time.sleep(0.5)
        self.driver.get('http://www.scihuber.com/e/action/ShowInfo.php?classid=1&id=3029')  # 打开的页面

        # 循环读取
        for d in self.list:
            try:
                self.getItemData(d)
            except Exception as e:
                print("出错")

def getItem(list):
    try:
        a = cnki(list)
        a.arr_exec()
    except Exception as e:
        print("出错" + e)


def getList(pageNum,kv, d1, d2):
    headers = {

        "Accept": "*/*",
        "Accept-Encoding": "gzip, deflate, br",
        "Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
        "Cache-Control": "no-cache",
        "Connection": "keep-alive",
        "Content-Length": "1124",
        "Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
        "Cookie": cookieStr,
        "Host": "kns.cnki.net",
        "Origin": "https://kns.cnki.net",
        "Pragma": "no-cache",
        "Referer": "https://kns.cnki.net/kns8s/defaultresult/index?crossids=YSTT4HG0%2CLSTPFY1C%2CJUP3MUPD%2CMPMFIG1A%2CWQ0UVIAA%2CBLZOG7CK%2CPWFIRAGL%2CEMRPGLPA%2CNLBO1Z6R%2CNN3FJMUV&korder=SU&kw=%E7%A4%BE%E4%BC%9A%E5%B7%A5%E4%BD%9C",
        "sec-ch-ua": "\"Not/A)Brand\";v=\"8\", \"Chromium\";v=\"126\", \"Google Chrome\";v=\"126\"",
        "sec-ch-ua-mobile": "?0",
        "sec-ch-ua-platform": "'macOS'",
        "Sec-Fetch-Dest": "empty",
        "Sec-Fetch-Mode": "cors",
        "Sec-Fetch-Site": "same-origin",
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/100.0.4896.60 Safari/537.36",
        "X-Requested-With": "XMLHttpRequest"
    }

    payload = {
        'boolSearch': 'false'
        ,
        'QueryJson': '{"Platform":"","Resource":"CROSSDB","Classid":"WD0FTY92","Products":"","QNode":{"QGroup":[{"Key":"Subject","Title":"","Logic":0,"Items":[],"ChildItems":[{"Key":"input[data-tipid=gradetxt-1]","Title":"文献来源","Logic":0,"Items":'
                     '[{"Key":"input[data-tipid=gradetxt-1]","Title":"文献来源","Logic":0,"Field":"LY","Operator":"DEFAULT","Value":"湖北社会科学","Value2":""}],"ChildItems":[]},{"Key":"input[data-tipid=gradetxt-3]","Title":"文献来源","Logic":0,"Items":'
                     '[{"Key":"input[data-tipid=gradetxt-3]","Title":"文献来源","Logic":0,"Field":"LY","Operator":"DEFAULT","Value":"湖北社会科学","Value2":""}],"ChildItems":[]}]},{"Key":"ControlGroup","Title":"","Logic":0,"Items":[],"ChildItems":[]}]},"ExScope":"0","SearchType":1,"Rlang":"CHINESE","KuaKuCode":"YSTT4HG0,LSTPFY1C,JUP3MUPD,MPMFIG1A,WQ0UVIAA,BLZOG7CK,PWFIRAGL,EMRPGLPA,NLBO1Z6R,NN3FJMUV","SearchFrom":1}'
        , 'pageNum': pageNum
        , 'pageSize': 50
        , 'sortField': 'PT'
        , 'sortType': 'desc'
        , 'dstyle': 'listmode'
        , 'boolSortSearch': 'false'
        ,
        'productStr': 'YSTT4HG0,LSTPFY1C,RMJLXHZ3,JQIRZIYA,JUP3MUPD,1UR4K4HZ,BPBAFJ5S,R79MZMCB,MPMFIG1A,WQ0UVIAA,NB3BWEHK,XVLO76FD,HR1YT1Z9,BLZOG7CK,PWFIRAGL,EMRPGLPA,J708GVCE,ML4DRIDX,NLBO1Z6R,NN3FJMUV,'
        , 'aside': '（主题：社会工作）OR（全文：社会工作(精确)）'
        , 'searchFrom': '资源范围：总库;  中英文扩展;  时间范围：发表时间：' + d1 + '到' + d2 + ';更新时间：不限;  '
    }

    # datastr = f'boolSearch=false&QueryJson=%7B%22Platform%22%3A%22%22%2C%22Resource%22%3A%22CROSSDB%22%2C%22Classid%22%3A%22WD0FTY92%22%2C%22Products%22%3A%22%22%2C%22QNode%22%3A%7B%22QGroup%22%3A%5B%7B%22Key%22%3A%22Subject%22%2C%22Title%22%3A%22%22%2C%22Logic%22%3A0%2C%22Items%22%3A%5B%5D%2C%22ChildItems%22%3A%5B%7B%22Key%22%3A%22input%5Bdata-tipid%3Dgradetxt-1%5D%22%2C%22Title%22%3A%22%E4%B8%BB%E9%A2%98%22%2C%22Logic%22%3A0%2C%22Items%22%3A%5B%7B%22Key%22%3A%22input%5Bdata-tipid%3Dgradetxt-1%5D%22%2C%22Title%22%3A%22%E4%B8%BB%E9%A2%98%22%2C%22Logic%22%3A0%2C%22Field%22%3A%22SU%22%2C%22Operator%22%3A%22TOPRANK%22%2C%22Value%22%3A%22%E7%A4%BE%E4%BC%9A%E5%B7%A5%E4%BD%9C%22%2C%22Value2%22%3A%22%22%7D%5D%2C%22ChildItems%22%3A%5B%5D%7D%2C%7B%22Key%22%3A%22input%5Bdata-tipid%3Dgradetxt-2%5D%22%2C%22Title%22%3A%22%E5%85%A8%E6%96%87%22%2C%22Logic%22%3A1%2C%22Items%22%3A%5B%7B%22Key%22%3A%22input%5Bdata-tipid%3Dgradetxt-2%5D%22%2C%22Title%22%3A%22%E5%85%A8%E6%96%87%22%2C%22Logic%22%3A1%2C%22Field%22%3A%22FT%22%2C%22Operator%22%3A%22DEFAULT%22%2C%22Value%22%3A%22%E7%A4%BE%E4%BC%9A%E5%B7%A5%E4%BD%9C%22%2C%22Value2%22%3A%22%22%7D%5D%2C%22ChildItems%22%3A%5B%5D%7D%5D%7D%2C%7B%22Key%22%3A%22ControlGroup%22%2C%22Title%22%3A%22%22%2C%22Logic%22%3A0%2C%22Items%22%3A%5B%5D%2C%22ChildItems%22%3A%5B%7B%22Key%22%3A%22span%5Bvalue%3DPT%5D%22%2C%22Title%22%3A%22%22%2C%22Logic%22%3A0%2C%22Items%22%3A%5B%7B%22Key%22%3A%22span%5Bvalue%3DPT%5D%22%2C%22Title%22%3A%22%E5%8F%91%E8%A1%A8%E6%97%B6%E9%97%B4%22%2C%22Logic%22%3A0%2C%22Field%22%3A%22PT%22%2C%22Operator%22%3A7%2C%22Value%22%3A%22{d1}%22%2C%22Value2%22%3A%22{d2}%22%7D%5D%2C%22ChildItems%22%3A%5B%5D%7D%5D%7D%5D%7D%2C%22ExScope%22%3A%221%22%2C%22SearchType%22%3A1%2C%22Rlang%22%3A%22CHINESE%22%2C%22KuaKuCode%22%3A%22YSTT4HG0%2CLSTPFY1C%2CJUP3MUPD%2CMPMFIG1A%2CWQ0UVIAA%2CBLZOG7CK%2CPWFIRAGL%2CEMRPGLPA%2CNLBO1Z6R%2CNN3FJMUV%22%2C%22SearchFrom%22%3A4%7D&pageNum={pageNum}&pageSize=50&sortField=PT&sortType=ASC&dstyle=listmode&boolSortSearch=false&sentenceSearch=false&productStr=YSTT4HG0%2CLSTPFY1C%2CRMJLXHZ3%2CJQIRZIYA%2CJUP3MUPD%2C1UR4K4HZ%2CBPBAFJ5S%2CR79MZMCB%2CMPMFIG1A%2CWQ0UVIAA%2CNB3BWEHK%2CXVLO76FD%2CHR1YT1Z9%2CBLZOG7CK%2CPWFIRAGL%2CEMRPGLPA%2CJ708GVCE%2CML4DRIDX%2CNLBO1Z6R%2CNN3FJMUV%2C&aside=&searchFrom=%E8%B5%84%E6%BA%90%E8%8C%83%E5%9B%B4%EF%BC%9A%E6%80%BB%E5%BA%93%3B++%E4%B8%AD%E8%8B%B1%E6%96%87%E6%89%A9%E5%B1%95%3B++%E6%97%B6%E9%97%B4%E8%8C%83%E5%9B%B4%EF%BC%9A%E5%8F%91%E8%A1%A8%E6%97%B6%E9%97%B4%EF%BC%9A{d1}%E5%88%B0{d2}%3B%E6%9B%B4%E6%96%B0%E6%97%B6%E9%97%B4%EF%BC%9A%E4%B8%8D%E9%99%90%3B++'
    datastr = f'boolSearch=true&QueryJson=%7B%22Platform%22%3A%22%22%2C%22Resource%22%3A%22CROSSDB%22%2C%22Classid%22%3A%22WD0FTY92%22%2C%22Products%22%3A%22%22%2C%22QNode%22%3A%7B%22QGroup%22%3A%5B%7B%22Key%22%3A%22Subject%22%2C%22Title%22%3A%22%22%2C%22Logic%22%3A0%2C%22Items%22%3A%5B%5D%2C%22ChildItems%22%3A%5B%7B%22Key%22%3A%22input%5Bdata-tipid%3Dgradetxt-1%5D%22%2C%22Title%22%3A%22%E6%96%87%E7%8C%AE%E6%9D%A5%E6%BA%90%22%2C%22Logic%22%3A0%2C%22Items%22%3A%5B%7B%22Key%22%3A%22input%5Bdata-tipid%3Dgradetxt-1%5D%22%2C%22Title%22%3A%22%E6%96%87%E7%8C%AE%E6%9D%A5%E6%BA%90%22%2C%22Logic%22%3A0%2C%22Field%22%3A%22LY%22%2C%22Operator%22%3A%22DEFAULT%22%2C%22Value%22%3A%22{kv}%22%2C%22Value2%22%3A%22%22%7D%5D%2C%22ChildItems%22%3A%5B%5D%7D%2C%7B%22Key%22%3A%22input%5Bdata-tipid%3Dgradetxt-3%5D%22%2C%22Title%22%3A%22%E6%96%87%E7%8C%AE%E6%9D%A5%E6%BA%90%22%2C%22Logic%22%3A0%2C%22Items%22%3A%5B%7B%22Key%22%3A%22input%5Bdata-tipid%3Dgradetxt-3%5D%22%2C%22Title%22%3A%22%E6%96%87%E7%8C%AE%E6%9D%A5%E6%BA%90%22%2C%22Logic%22%3A0%2C%22Field%22%3A%22LY%22%2C%22Operator%22%3A%22DEFAULT%22%2C%22Value%22%3A%22{kv}%22%2C%22Value2%22%3A%22%22%7D%5D%2C%22ChildItems%22%3A%5B%5D%7D%5D%7D%2C%7B%22Key%22%3A%22ControlGroup%22%2C%22Title%22%3A%22%22%2C%22Logic%22%3A0%2C%22Items%22%3A%5B%5D%2C%22ChildItems%22%3A%5B%5D%7D%5D%7D%2C%22ExScope%22%3A%220%22%2C%22SearchType%22%3A1%2C%22Rlang%22%3A%22CHINESE%22%2C%22KuaKuCode%22%3A%22YSTT4HG0%2CLSTPFY1C%2CJUP3MUPD%2CMPMFIG1A%2CWQ0UVIAA%2CBLZOG7CK%2CPWFIRAGL%2CEMRPGLPA%2CNLBO1Z6R%2CNN3FJMUV%22%2C%22SearchFrom%22%3A1%7D&pageNum={pageNum}&pageSize=50&sortField=&sortType=&dstyle=listmode&boolSortSearch=false&sentenceSearch=false&productStr=YSTT4HG0%2CLSTPFY1C%2CRMJLXHZ3%2CJQIRZIYA%2CJUP3MUPD%2C1UR4K4HZ%2CBPBAFJ5S%2CR79MZMCB%2CMPMFIG1A%2CWQ0UVIAA%2CNB3BWEHK%2CXVLO76FD%2CHR1YT1Z9%2CBLZOG7CK%2CPWFIRAGL%2CEMRPGLPA%2CJ708GVCE%2CML4DRIDX%2CNLBO1Z6R%2CNN3FJMUV%2C&aside=%EF%BC%88%E6%96%87%E7%8C%AE%E6%9D%A5%E6%BA%90%EF%BC%9A{kv}(%E7%B2%BE%E7%A1%AE)%EF%BC%89AND%EF%BC%88%E6%96%87%E7%8C%AE%E6%9D%A5%E6%BA%90%EF%BC%9A{kv}(%E7%B2%BE%E7%A1%AE)%EF%BC%89&searchFrom=%E8%B5%84%E6%BA%90%E8%8C%83%E5%9B%B4%EF%BC%9A%E6%80%BB%E5%BA%93%3B++%E6%97%B6%E9%97%B4%E8%8C%83%E5%9B%B4%EF%BC%9A%E6%9B%B4%E6%96%B0%E6%97%B6%E9%97%B4%EF%BC%9A%E4%B8%8D%E9%99%90%3B++&CurPage=1'

    response = requests.post("https://kns.cnki.net/kns8s/brief/grid", headers=headers,
                             data=datastr)
    # print(response.content)
    unicode_string = response.content.decode('utf-8')
    # print(unicode_string)

    # with open("test.txt", "r") as file:
    #     # 读取整个文件内容
    #     unicode_string = file.read()
    assert unicode_string.rfind('请输入验证码') == -1, '知网节超时验证'
    assert unicode_string.rfind('知网节超时验证') == -1, '知网节超时验证'
    # with open("list.txt", "w") as file:
    #     # 读取整个文件内容
    #     file.write(unicode_string)
    soup = BeautifulSoup(unicode_string, 'html.parser')

    # 分页数量
    class_citation = soup.find(class_="countPageMark")
    print(class_citation.text.strip())
    pageSize = class_citation.text.strip().split("/")[1]

    list = soup.select("#gridTable  table > tbody > tr")

    datas = []
    for tr in list:
        try:
            name = tr.select('td.name a')[0]
            d = {
                'accessionNo': tr.select('td.operat > a.icon-collect')[0]['data-filename'],
                'type': 10,
                'pageNum': pageNum,
                'sourceWebsite': '知网',
                'language': 'zh',
                'datasource': '社会工作',
                'documentName': name.text.strip(),
                'url': name['href'],
                'author': tr.select('td.author')[0].text.strip(),
                'source': tr.select('td.source')[0].text.strip(),
                'database': tr.select('td.data')[0].text.strip(),
                'date': tr.select('td.date')[0].text.strip(),
                'infoData': {
                    'download_url': tr.select('td.operat a')[0]['href'],
                }
            }
            datas.append(d)
        except Exception as e:
            print(e)
    # try:
    #     getItemData(d)
    # except Exception as e:
    #     print(e)
    #     traceback.print_exc()
    # 开一个线程 去读取明细
    # getItem(datas)
    thread = threading.Thread(target=getItem, args=(datas,))
    thread.start()  # 启动线程
    time.sleep(40)
    return pageSize


if __name__ == "__main__":
    # getCookie()

    with open("cookieStr.txt", "r") as file:
        # 读取整个文件内容
        cookieStr = file.read()
    print(cookieStr)

    # pageNum = 1
    d = date(1958, 6, 30)
    # 当前日期 1 2024-05-01 2024-06-01
    # 21 2024-03-01 2024-04-01
    start_date = '2022-12-01'
    # start_date = datetime.datetime.now().strftime("%Y-%m-%d")
    startPageNum = 7
    i = 1
    list = []
    while d < datetime.datetime.strptime(start_date, '%Y-%m-%d').date():
        dates = get_previous_months_dates(start_date, 1)
        # 最多300页
        start_date = dates[1]
        pageNum = startPageNum
        if startPageNum != 1:
            startPageNum = 1
        while pageNum <= 300:

            # 获取今天的日期、上个月的同一天以及再上个月的同一天
            pageSize = 0
            try:
                pageSize = getList(pageNum, dates[1], dates[0])
                print(pageNum, dates[1], dates[0])
            except Exception as e:
                print(e)
                # 刷新cookie
                try:
                    cookieStr = getCookie()
                except:
                    print()
                # 重复三次后退出
                if i>3:
                    list.append({pageNum, dates[1], dates[0]})
                    break
                i +=1
                continue

            i = 1
            print(pageSize)
            if pageSize == pageNum:
                break
            pageNum += 1
        # print("结束当前：", pageNum)
