import os
import requests
from lxml import etree
import multiprocessing
from multiprocessing import Pool, freeze_support
from functools import partial
from PIL import Image

headers = {
    'User-Agent': "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36",
    "Referer": "https://www.meitulu.com"}

# 程序入口
id = "33437"
tkurl=""

def downloadimg(urls):
    # dir=listurl[0].split('/')[-2]
    dir = "d:/girls/" + id + "/"

    if not os.path.exists(dir):  ##如果没有这个文件夹的话，创造并进入
        os.makedirs(dir)  ##创建一


    for url in urls:
        dowloadheaders = {
            'User-Agent': "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36",

            "Referer": "",
        }
        ##上面美图录喜欢修改host
        dowloadheaders["Referer"] = "https://www.nvshens.org/img.html?img=" + url
        response = requests.get(url, headers=dowloadheaders, allow_redirects=False)
        imgname = url.split('/')[-1]
        path = dir + imgname

        with open(path, 'wb')as img:
            img.write(response.content)
            print("Download "+url)




def makeUrls(id, n):
    nums = []
    urls = []

    for i in range(n):
        nums.append(str(i).rjust(3, '0'))
    # url="https://img.onvshen.com:85/gallery/23100/"+id+"/s/"+nums[i]+".jpg"
    for i in range(n):
        urls.append("https://t1.onvshen.com:85/gallery/"+tkurl+"/" + id  +'/'+ nums[i] + ".jpg")

    # 0号图片的url比较特殊
    urls[0] = "https://t1.onvshen.com:85/gallery/"+tkurl+"/" + id  + "/0.jpg"
    return urls


# print(makeUrls("234",14))

def getPages(id):
    url = "https://m.nvshens.net/g/" + id + "/1.html"
    # url = "https://www.nvshens.net/g/" + id + "/1.html"
    response = requests.get(url, headers=headers, allow_redirects=False)
    html = etree.HTML(response.text)
    n = html.xpath('//span[@class="page"]/text()')
    print("本页面有：")
    num = n[0].split('/')[-1]
    print(num)
    return num


def getImgUrl(id):
    url = "https://www.nvshens.net/g/" + id + "/1.html"
    response = requests.get(url, headers=headers, allow_redirects=False)
    html = etree.HTML(response.text)
    result = html.xpath('//img/@src')
    n = result[0].rindex("/")
    url = result[0][0:n - 1]
    print(url)
    return url


def averageTask(l, n):
    lenth = (int)(len(l) / n if len(l) % n == 0 else len(l) / n + 1)
    return [l[m:m + lenth] for m in range(0, len(l), lenth)]


def getPagesnew(id):
    url = "https://www.nvshens.org/g/" + id + "/1.html"
    response = requests.get(url, headers=headers, allow_redirects=False)
    html = etree.HTML(response.text)
    n = html.xpath('//span[@style="color: #DB0909"]/text()')
    t=n[0][:-3]
    print(t)
    imgNums=int(t)
    num=round(imgNums/3)
    print("本页面有：" +str(num) )

    aa=html.xpath('//ul[@id="hgallery"]/img[1]/@src')
    #获取 "https://t1.onvshen.com:85/gallery/27559/33413/s/0.jpg" 中的27559
    global tkurl
    tkurl=aa[0].split('/')[-4]

    print("图库URL："+tkurl)
    return imgNums

if __name__ == '__main__':
    freeze_support()
    pool = Pool(multiprocessing.cpu_count())  # 线程池中的同时执行的进程数为3

    nums = getPagesnew(id)

    urls = makeUrls(id, nums)
    print("urls:",urls)
    tasks = averageTask(urls, 6)
    for i in tasks:
        pool.apply_async(func=downloadimg, args=(i,))  # 线程池中的同时执行的进程数为3，当一个进程执行完毕后，如果还有新进程等待执行，则会将其添加进去

    print('end')
    pool.close()
    pool.join()

# getImgUrl(str(33198))