# coding=utf-8
import os.path
import time
import urllib3
from urllib3 import PoolManager, ProxyManager, Timeout
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.common.by import By
from tqdm import tqdm
import shutil
import threading
import re
from concurrent.futures import (
    ThreadPoolExecutor,
    wait,
    ALL_COMPLETED,
    FIRST_COMPLETED,
    as_completed,
)
import ssl
from random import choice
from browser import Browser


# 全局取消证书验证，避免访问https网页报错
ssl._create_default_https_context = ssl._create_unverified_context
workPath = r"/home/zjvis/scihub/"
basePath = r"/home/zjvis/scihub/data/"
baseUrl = [r"https://sci-hub.se/", r"http://sci-hub.st/", r"http://sci-hub.ru/"]

cutPiece = 10000
maxWorkers = 10
maxDelay = 5

def getDownloadLink(downloadText):
    matchObj = re.match(r".*\'(.*)\'.*", downloadText)
    rearText = matchObj.group(1)
    if rearText.startswith("//"):
        return r"https:{}".format(rearText)
    else:
        return r"{}{}".format(choice(baseUrl), rearText)

def download(fileName, beginIndex=0):
    def downloadElement(line, index):
        try:
            outputPath = r"{}{}.pdf".format(basePath, index)
            if os.path.exists(outputPath):
                return 0
            url = "{}{}".format(choice(baseUrl), line)
            browser.item.get(url)

            downloadButtons = browser.item.find_elements(
                By.XPATH,
                r"//button[contains(text(),'save') or contains(text(), '下载')]",
            )

            # 爬虫检测，重新获取
            if len(downloadButtons) == 0:
                time.sleep(maxDelay + 2)

            downloadButton = browser.item.find_element(
                By.XPATH,
                r"//button[contains(text(),'save') or contains(text(), '下载')]",
            )
            downloadLink = getDownloadLink(downloadButton.get_attribute("onclick"))
            with http.request(
                'GET', downloadLink, preload_content=False
            ) as res, open(outputPath, 'wb') as out:
                shutil.copyfileobj(res, out)
            time.sleep(maxDelay)
            return 1
        except Exception as exception:
            # with open("{}log/{}.html".format(workPath, str(index)), "w") as error:
            #     error.write(browser.item.page_source)
            browser.item.execute_script('window.stop()')
            time.sleep(maxDelay)
            return -1

    browser = Browser()

    filePath = "{}multiprocess/{}".format(workPath, fileName)

    index = beginIndex
    with open(filePath, "r", encoding="utf-8") as f:
        for line in tqdm(f, filePath):
            line = line.strip()
            flag = downloadElement(line, index)
            if flag == 1:
                # 写文件控制同步
                lock.acquire()
                print("{}^{}".format(index, line), file=fSuccess)
                fSuccess.flush()
                lock.release()
            elif flag == -1:
                lock.acquire()
                print("{}^{}".format(index, line), file=fFailed)
                fFailed.flush()
                lock.release()
            index += 1
    browser.stopItem()


fSuccess = open("{}log/succ.txt".format(workPath), 'w')
fFailed = open("{}log/error.txt".format(workPath), 'w')
timeout = Timeout(connect=30, read=60)
# http = ProxyManager("http://a901.kdltps.com:15818", timeout=timeout)
http = PoolManager(timeout=timeout)
lock = threading.RLock()
threadPool = ThreadPoolExecutor(max_workers=maxWorkers)
allTask = []

i = 0
for fileName in os.listdir("{}multiprocess".format(workPath)):
    preIndex = int(fileName.split("_")[-1].split(".")[0])
    index = cutPiece * (preIndex - 1)
    task = threadPool.submit(download, fileName, index)
    allTask.append(task)
    i += 1
    if i == 5:
        break


wait(allTask, return_when=ALL_COMPLETED)
threadPool.shutdown(wait=True)
fSuccess.close()
fFailed.close()
