import os
import requests
from lxml import etree
import multiprocessing
from multiprocessing import Pool, freeze_support
from functools import partial
import time
headers = {
    'User-Agent': "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36",
    "Referer": "https://www.naixue.org/"}

# 程序入口
id = "56239"


def downloadimg(pages):
    # dir=listurl[0].split('/')[-2]

    dir = "k:/girls/naixue/" + id + "/"

    if not os.path.exists(dir):  ##如果没有这个文件夹的话，创造并进入
        os.makedirs(dir)  ##创建一

    picurls = makeUrls(pages)
    for url in picurls:
        dowloadheaders = {
            "accept": "image/webp,image/apng,image/*,*/*;q=0.8",
            "accept-encoding": "gzip, deflate, br",
            "accept-language": "zh-CN,zh;q=0.9",
            "cache-control": "no-cache",
            "cookie": 'cf_chl_1=a691491ac055bf6',
            "pragma": "no-cache",
            "referer": 'https://www.naixue.org/45228.html',
            'sec-fetch-dest': 'image',
            "sec-fetch-mode": "no-cors",
            "sec-fetch-site": "cross-site",
            'user-agent': "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36"
        }
        # time.sleep(1)
        ##上面美图录喜欢修改host
        # url="https://img.onvshen.com:85/gallery/23100/32147/s/001.jpg";
        response = requests.get(url, headers=dowloadheaders, allow_redirects=False)
        imgname = url.split('/')[-1]
        path = dir + imgname
        n = len(response.content)
        # 防止下载空文件
        if n > 1024:
            with open(path, 'wb')as img:
                print("Download: " + path)
                img.write(response.content)


def makeUrls(urls):
    picurls = []

    for url in urls:
        for u in getImgUrl(url):
            picurls.append(u)

    return picurls


# print(makeUrls("234",14))

def getPages(url):
    url = "https://www.naixue.org/" + id + ".html"
    response = requests.get(url, headers=headers, allow_redirects=False)
    html = etree.HTML(response.text)
    n = html.xpath('//a[@class="page-numbers"]/text()')
    print("本页面有：")
    numstr = n[-1]
    num = str(numstr.encode("utf-8"), encoding='utf-8')
    print(num)
    urls = []
    urls.append(url)
    for i in range(2, int(num)+1):
        urls.append("https://www.naixue.org/" + id + "_" + str(i) + ".html")
    print(urls)
    return urls


def getImgUrl(url):
    # url = "https://www.naixue.org/" + id + ".html"
    response = requests.get(url, headers=headers, allow_redirects=False)
    html = etree.HTML(response.text)
    # result = html.xpath('//img/@src')
    n = html.xpath('//a[@class="imageclick-imgbox"]//@src')
    # n=result[0].rindex("/")
    # url=result[0][0:n-1]
    print(n)
    return n


def averageTask(l, n):
    lenth = (int)(len(l) / n if len(l) % n == 0 else len(l) / n + 1)
    return [l[m:m + lenth] for m in range(0, len(l), lenth)]



if __name__ == '__main__':
    freeze_support()
    pool = Pool(6) #线程池中的同时执行的进程数为3


    pages = getPages(id)

    # urls = makeUrls(pages)
    #
    # print("Total imgs:",urls)
    tasks=averageTask(pages,6)
    for i in tasks:

        pool.apply_async(func=downloadimg,args=(i,)) #线程池中的同时执行的进程数为3，当一个进程执行完毕后，如果还有新进程等待执行，则会将其添加进去


    print('end')
    pool.close()
    pool.join()

# downloadimg(getImgUrl(str(45245)))
# getPages(str(49229))
