import os
import requests
from lxml import etree
import multiprocessing
from multiprocessing import Pool, freeze_support
from functools import partial
from PIL import Image

headers = {
    'User-Agent': "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36",
    "Referer": "https://www.meitulu.com"}

# 程序入口
id = "33198"


def downloadimg(urls):
    # dir=listurl[0].split('/')[-2]

    dir = "k:/girls/" + id + "/"

    if not os.path.exists(dir):  ##如果没有这个文件夹的话，创造并进入
        os.makedirs(dir)  ##创建一

    for url in urls:
        dowloadheaders = {
            'User-Agent': "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36",
            "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3",
            "Accept-Encoding": "gzip, deflate, br",
            "Accept-Language": "zh-CN,zh;q=0.9",
            "Connection": "keep-alive",
            "Referer":""

        }
        ##上面美图录喜欢修改host
        # url="https://img.onvshen.com:85/gallery/23100/32147/s/001.jpg";
        dowloadheaders["Referer"]="https://www.nvshens.net/img.html?img="+url
        # print("dowloadheaders",dowloadheaders)
        response = requests.get(url, headers=dowloadheaders, allow_redirects=False)
        imgname = url.split('/')[-1]
        path = dir + imgname
        n = len(response.content)
        #防止下载空文件
        if n > 1024:
            with open(path, 'wb')as img:
                print("Download: " + path)
                img.write(response.content)




def makeUrls(id, n):
    nums = []
    urls = []
    t = n * 3
    for i in range(t):
        nums.append(str(i).rjust(3, '0'))
    url = getImgUrl(id)
    qzstr="https://www.nvshens.net/img.html?img="
    for i in range(t):
        urls.append(url + nums[i] + ".jpg")

    # 0号图片的url比较特殊
    urls[0] = url + "0.jpg"
    return urls


# print(makeUrls("234",14))

def getPages(id):
    url = "https://m.nvshens.net/g/" + id + "/1.html"
    # url = "https://www.nvshens.net/g/" + id + "/1.html"
    response = requests.get(url, headers=headers, allow_redirects=False)
    html = etree.HTML(response.text)
    n = html.xpath('//span[@class="page"]/text()')
    print("本页面有：")
    num = n[0].split('/')[-1]
    print(num)
    return num


def getImgUrl(id):
    url = "https://www.nvshens.net/g/" + id + "/1.html"
    response = requests.get(url, headers=headers, allow_redirects=False)
    html = etree.HTML(response.text)
    result = html.xpath('//img/@src')
    n = result[0].rindex("/")
    url = result[0][0:n - 1]
    print(url)
    return url


def averageTask(l, n):
    lenth = (int)(len(l) / n if len(l) % n == 0 else len(l) / n + 1)
    return [l[m:m + lenth] for m in range(0, len(l), lenth)]


if __name__ == '__main__':
    freeze_support()
    pool = Pool(multiprocessing.cpu_count())  # 线程池中的同时执行的进程数为3

    nums = getPages(id)

    urls = makeUrls(id, int(nums))
    print("urls:",urls)
    tasks = averageTask(urls, 6)
    for i in tasks:
        pool.apply_async(func=downloadimg, args=(i,))  # 线程池中的同时执行的进程数为3，当一个进程执行完毕后，如果还有新进程等待执行，则会将其添加进去

    print('end')
    pool.close()
    pool.join()

# getImgUrl(str(33198))