import traceback
from sqlite3 import connect

from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.common.keys import Keys
from selenium import webdriver
from bs4 import BeautifulSoup
import pandas as pd
import time, keyboard
from urllib.parse import urlparse, parse_qs
import requests
import threading
import json

from config import webdriverPath, hostUrl, download_default_directory


class cnki:

    def __init__(self, item):
        self.item = item
        self.i = i

    def download_file(self, item):
        item_url = self.driver.current_url
        # 点击下载
        self.driver.find_element(By.CSS_SELECTOR, '#pdfDown').click()
        time.sleep(1)
        self.driver.switch_to.window(self.driver.window_handles[-1])
        # 还在当前页面 跳过
        time.sleep(1)

        try:
            if item_url == self.driver.current_url:
                print(item['documentName'], "下载失败，没有跳转下载也")
                # self.driver.close()
                return
        except Exception as e:
            # self.driver.switch_to.window(self.driver.window_handles[-1])
            print(item['documentName'], "下载失败，没有跳转下载也")
            # driver.close()
            return

        try:
            WebDriverWait(self.driver, 2).until(
                EC.visibility_of_element_located((By.CSS_SELECTOR,
                                                  '#zhifu'))
            )
            print(item['documentName'], "下载失败需要支付")
            time.sleep(1)
            self.driver.close()
            time.sleep(1)
            self.driver.switch_to.window(self.driver.window_handles[-1])
            return
        except Exception as e:
            print(item['documentName'], "开始下载")

        # driver.find_element(By.CSS_SELECTOR, '#download_input1').click()

        # 获取当前页面的URL
        # 解析URL
        # parsed_url = urlparse(driver.current_url)

        # 获取查询字符串参数
        # query_params = parse_qs(parsed_url.query)
        downloadUrl = self.driver.execute_async_script("""
    var done = arguments[arguments.length - 1]; // 异步回调函数
    var res
        $.ajax({
            type: "post",
            url: BaseUrl.api + "/downloadFee/confimDownLoad",
            async: true,
            contentType: "application/json",
            data: JSON.stringify({
            "cacheId": cacheID,
            "lang": "",
        }),
            dataType: "json",
            success: function (result) {
                done(result); // 当AJAX请求成功时，调用done函数
            },
            error: function (xmlHttpRequest, textStatus, errorThrown) {
                done({code : 400}); // 当AJAX请求失败时，调用done函数
            }
        })
        """)
        # r = json.loads(downloadUrl)
        if downloadUrl['code'] == 200000:
            # blob = requests.get()
            # item['fullText'] = blob.content
            item['fullText'] = "https:" + downloadUrl['data']['downloadUrl']
            print("下载成功")
        self.driver.close()
        time.sleep(0.3)
        self.driver.switch_to.window(self.driver.window_handles[-1])

    def getItemData(self, item, name):
        res = requests.post(hostUrl + '/literature/getByAccessionNo',
                            json={'accessionNo': item['accessionNo']})
        if res.json().get('data') == '0':
            name.click()

            self.driver.switch_to.window(self.driver.window_handles[-1])
            try:
                # 等待tr渲染出来
                WebDriverWait(self.driver, 5).until(
                    EC.visibility_of_element_located((By.CSS_SELECTOR, '.row'))
                )
            except Exception as e:
                return
            item_content = self.driver.page_source
            try:
                soup = BeautifulSoup(item_content, 'html.parser')

                d = {}
                for row in soup.select('.rowtit'):
                    # t = row.text.strip()
                    # index = t.find('：')
                    # row.select('.rowtit')[0].fin
                    # if index != -1:
                    # key = t[:index]
                    key = row.text.strip().replace('：', '')
                    # value = t[index + 1:]
                    value = row.find_next_sibling().text.strip()
                    d[key] = value

                # print(d)
                item['abstractInfo'] = d.pop('正文快照', d.pop('摘要', None))
                item['keywords'] = d.pop('关键词', None)
                item['doi'] = d.pop('DOI', None)
                item['infoData'] = (d)
                # 下载
                try:
                    self.download_file(item)
                except Exception as e:
                    print('下载失败：')
            except Exception as e:
                print(e)
                traceback.print_exc()

            # with open(item['documentName'] + "_" + str(e) + ".txt", "w") as file:
            # 读取整个文件内容
            # file.write(item_content)

            print(item['documentName'], "保存")
            time.sleep(1)
            try:
                self.driver.switch_to.window(self.driver.window_handles[-1])
                if len(self.driver.window_handles) > 1:
                    self.driver.close()
                    self.driver.switch_to.window(self.driver.window_handles[0])
            except Exception as e:
                print("关闭失败")
            requests.post(hostUrl + '/literature/save', json=item)
        else:
            print(item['documentName'], "该论文已存在")

    def arr_exec(self):
        item = self.item
        # 创建ChromeOptions对象
        chrome_options = webdriver.ChromeOptions()
        # 禁止加载图片等资源
        chrome_options.add_argument("--disable-images")
        chrome_options.add_argument("--disable-plugins")
        chrome_options.add_argument("--disable-extensions")
        # 设置全屏参数
        chrome_options.add_argument("--start-maximized")
        prefs = {
            "download.default_directory": download_default_directory,
        }
        chrome_options.add_experimental_option("prefs", prefs)

        # 创建WebDriver对象时传入ChromeOptions
        self.driver = webdriver.Chrome(options=chrome_options, service=Service(webdriverPath))
        self.driver.get('http://www.scihuber.com/e/member/login/')  # 打开的页面

        # # 登录
        # WebDriverWait(driver, wait_time).until(
        #     EC.visibility_of_element_located((By.ID, 'username'))
        # )
        self.driver.find_element(By.ID, 'username').send_keys('917612')
        self.driver.find_element(By.ID, 'password').send_keys('02690773584')
        self.driver.find_element(By.CSS_SELECTOR, '#maincolumn .lBtn').click()
        # input("请手动操作至论文详情页面,完成后按Enter键继续...")
        time.sleep(1)
        self.driver.get('http://www.scihuber.com/e/action/ShowInfo.php?classid=1&id=3029')  # 打开的页面

        # 获取所有cookies
        # cookies = self.driver.get_cookies()
        #
        # cookieStr = ''
        # # 打印cookies
        # for cookie in cookies:
        #     cookieStr = cookieStr + (cookie['name'] + '=' + cookie['value'] + '; ')
        # driver.quit()
        # 打开文件
        # with open("cookieStr.txt", "w") as file:
        #     # 写入JSON字符串到文件
        #     file.write(cookieStr)
        # print(cookieStr)
        try:
            nextPage = self.driver.find_element(By.CSS_SELECTOR, '#txt_SearchText')
        except Exception as e:
            nextPage = self.driver.find_element(By.CSS_SELECTOR, '#txt_search')
        nextPage.clear()
        nextPage.send_keys('社会工作')
        nextPage.send_keys(Keys.RETURN)  # 模拟按下回车键
        time.sleep(3)
        skipNum = item['skipNum']
        # 跳转
        if skipNum > 0:
            # 等待tr渲染出来
            WebDriverWait(self.driver, 5).until(
                EC.visibility_of_element_located((By.CSS_SELECTOR,
                                                  '#briefBox > div:nth-child(2) > div:nth-child(1) > div.pages > div:nth-child(2) > a'))
            )
            skip = self.driver.find_element(By.CSS_SELECTOR,
                                            '#briefBox > div:nth-child(2) > div:nth-child(1) > div.pages > div:nth-child(2) > a')
            self.driver.execute_script("arguments[0].setAttribute('data-curpage', '" + str(skipNum) + "');", skip)
            self.driver.execute_script("arguments[0].click();", skip)
            time.sleep(3)
        # 循环读取
        for i in range(skipNum, item['size']):
            item['skipNum'] = i
            # 等待tr渲染出来
            WebDriverWait(self.driver, 5).until(
                EC.visibility_of_element_located((By.CSS_SELECTOR,
                                                  '#gridTable   table > tbody > tr'))
            )

            # try:
            list = self.driver.find_elements(By.CSS_SELECTOR, '#gridTable  table > tbody > tr')
            # list = soup.select("#gridTable > div > div > table > tbody > tr")
            for tr in list:
                name = tr.find_element(By.CSS_SELECTOR, 'td.name  a')
                d = {
                    'accessionNo': tr.find_element(By.CSS_SELECTOR, 'td.operat > a.icon-collect').get_attribute(
                        'data-filename'),
                    'type': 10,
                    'sourceWebsite': '知网',
                    'language': 'zh',
                    'datasource': '社会工作',
                    'documentName': name.text.strip(),
                    'url': name.get_attribute('href'),
                    'author': tr.find_element(By.CSS_SELECTOR, 'td.author').text.strip(),
                    'source': tr.find_element(By.CSS_SELECTOR, 'td.source').text.strip(),
                    'database': tr.find_element(By.CSS_SELECTOR, 'td.data').text.strip(),
                    'date': tr.find_element(By.CSS_SELECTOR, 'td.date').text.strip(),
                    # 'infoData': {
                    #     'download_url': tr.find_element(By.CSS_SELECTOR, 'td.operat > a').get_attribute('href'),
                    # }
                }
                # name.click()
                self.getItemData(d, name)
                # datas.append(d)
                try:
                    self.driver.switch_to.window(self.driver.window_handles[0])
                except Exception as e:
                    a=1
            print("切换: ", i)
            if len(self.driver.window_handles) > 5:
                self.driver.quit()
                break
            self.driver.find_element(By.CSS_SELECTOR, '#Page_next_top').click()
            time.sleep(1)
            item['skipNum'] = i + 1
            # break
            # except Exception as e:
            #     print(e)
            #     traceback.print_exc()

            # with open("list/" + str(i) + "_" + str(e) + ".txt", "w") as file:
            # 写入JSON字符串到文件
            # file.write(html)
            # datas.append(d)


def arrThread(item):
    # 创建ChromeOptions对象
    while item['skipNum'] < item['size']:
        try:
            c = cnki(item)
            c.arr_exec()
        except Exception as e:
            print(e)
            traceback.print_exc()
        # 打开文件
        with open("data.json", "w") as file:
            # 写入JSON字符串到文件
            json.dump(arr, file)


if __name__ == "__main__":

    arr = [
        {"skipNum": 1, "size": 60, "filename": '1.json'},
        {"skipNum": 61, "size": 120},
        {"skipNum": 121, "size": 180},
        {"skipNum": 181, "size": 240},
        {"skipNum": 241, "size": 300}
    ]
    # 打开文件
    with open("data.json", "w") as file:
        # 写入JSON字符串到文件
        json.dump(arr, file)
    # 创建两个线程
    threads = list()
    for i in arr:
        # arrThread(i)
        thread = threading.Thread(target=arrThread, args=(i,))
        threads.append(thread)
        thread.start()  # 启动线程
        time.sleep(40)
