# coding=utf-8
import os.path
import time
from urllib3 import PoolManager
from tqdm import tqdm
from lxml import etree
import threading
import re
from concurrent.futures import (
    ThreadPoolExecutor,
    wait,
    ALL_COMPLETED,
    FIRST_COMPLETED,
    as_completed,
)
import ssl
from random import choice
import urllib.request

# 全局取消证书验证，避免访问https网页报错
ssl._create_default_https_context = ssl._create_unverified_context

tunnel = "a901.kdltps.com:15818"
username = "t19744135387372"
password = "gx1874ki"

proxies = {
    "http": "http://%(user)s:%(pwd)s@%(proxy)s/"
    % {"user": username, "pwd": password, "proxy": tunnel},
    "https": "http://%(user)s:%(pwd)s@%(proxy)s/"
    % {"user": username, "pwd": password, "proxy": tunnel},
}

workPath = "/home/zjvis/crawler/"
basePath = "/mnt/data/crawler/"
cutPiece = 10000
beginIndex = 0
maxWorkers = 10
maxDelay = 5
baseUrl = [r"https://sci-hub.se/", r"http://sci-hub.st/", r"http://sci-hub.ru/"]

proxy_support = urllib.request.ProxyHandler(proxies)
opener = urllib.request.build_opener(proxy_support)
opener.addheaders = [
    (
        'User-Agent',
        'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/36.0.1941.0 Safari/537.36',
    )
]
urllib.request.install_opener(opener)


def getDownloadLink(downloadText):
    matchObj = re.match(r".*\'(.*)\'.*", downloadText)
    rearText = matchObj.group(1)
    if rearText.startswith("//"):
        return r"https:{}".format(rearText)
    else:
        return r"{}{}".format(choice(baseUrl), rearText)


headers = {'User-Agent': 'Mozilla/5.0 3578.98 Safari/537.36'}


def download(fileName, index=0):
    def downloadElement(line, index):
        try:
            outputPath = r"{}{}.pdf".format(basePath, index)
            if os.path.exists(outputPath):
                return 0
            url = "{}{}".format(choice(baseUrl), line)
            req = urllib.request.Request(url, headers=headers)
            response = urllib.request.urlopen(req)
            html_str = response.read().decode("utf-8")
            html = etree.HTML(html_str)
            downloadButton = html.xpath(
                r"//button[contains(text(),'save') or contains(text(), '下载')]"
            )
            downloadLink = getDownloadLink(downloadButton[0].attrib.get("onclick"))
            req = urllib.request.Request(downloadLink, headers=headers)
            response = urllib.request.urlopen(req)
            with open(outputPath, 'wb') as outputF:
                outputF.write(response.content)
            # urllib.request.urlretrieve(downloadLink, outputPath)
            time.sleep(maxDelay)
            return 1
        except Exception as es:
            time.sleep(maxDelay)
            # print(str(es))
            return -1

    filePath = "{}multiprocess/{}".format(workPath, fileName)

    with open(filePath, "r", encoding="utf-8") as f:
        for line in tqdm(f, filePath):
            line = line.strip()
            flag = downloadElement(line, index)
            if flag == 1:
                # 写文件控制同步
                lock.acquire()
                print("{}^{}".format(index, line), file=fSuccess)
                fSuccess.flush()
                lock.release()
            elif flag == -1:
                lock.acquire()
                print("{}^{}".format(index, line), file=fFailed)
                fFailed.flush()
                lock.release()
            index += 1


fSuccess = open("succ.txt", 'w')
fFailed = open("error.txt", 'w')
http = PoolManager()
lock = threading.RLock()
threadPool = ThreadPoolExecutor(max_workers=maxWorkers)
allTask = []
for fileName in os.listdir("{}multiprocess".format(workPath)):
    preIndex = int(fileName.split("_")[-1].split(".")[0])
    index = cutPiece * (preIndex - 1)
    task = threadPool.submit(download, fileName, index)
    allTask.append(task)
    break

wait(allTask, return_when=ALL_COMPLETED)
threadPool.shutdown(wait=True)
fSuccess.close()
fFailed.close()
