import os
import requests
from lxml import etree
import multiprocessing
from multiprocessing import Pool, freeze_support
from functools import partial

headers = {
    'User-Agent': "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36",
    "Referer": "https://www.meitulu.com"}

# 程序入口
url = "https://www.shzx.org/a/290-6581-0.html"
start=url.rindex("/")
end=url.rindex("-")
id=url[start+1:end]


def downloadimg(pages):
    # dir=listurl[0].split('/')[-2]

    dir = "k:/girls/" + id + "/"

    if not os.path.exists(dir):  ##如果没有这个文件夹的话，创造并进入
        os.makedirs(dir)  ##创建一
    print(pages)
    picurls=makeUrls(pages)
    for url in picurls:
        dowloadheaders = {
            "accept": "image/webp,image/apng,image/*,*/*;q=0.8",
            "cookie":'cf_chl_1=a691491ac055bf6',
            "accept-encoding": "gzip, deflate, br",
            "accept-language": "zh-CN,zh;q=0.9",
            "cache-control": "no-cache",
            "pragma": "no-cache",

            "referer": str(url.encode("utf-8"), encoding='utf-8'),
            'sec-fetch-dest': 'image',
            "sec-fetch-mode": "no-cors",
            "sec-fetch-site": "cross-site",
            'User-Agent': "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36"
        }
        ##上面美图录喜欢修改host
        # url="https://img.onvshen.com:85/gallery/23100/32147/s/001.jpg";
        response = requests.get(url, headers=dowloadheaders, allow_redirects=False)
        imgname = url.split('/')[-1]
        path = dir + imgname
        n = len(response.content)
        # 防止下载空文件
        if n > 1024:
            with open(path, 'wb')as img:
                print("Download: " + path)
                img.write(response.content)


def makeUrls(urls):
    picurls = []

    for url in urls:
        for u in getImgUrl(url):
            picurls.append(u)

    return picurls


# print(makeUrls("234",14))

def getPages(url):
    # url = "https://www.shzx.org/a/" + id + ".html"
    response = requests.get(url, headers=headers, allow_redirects=False)
    html = etree.HTML(response.text)
    n = html.xpath('//div[@class="paging"]/a/text()')
    # print("本页面有："+n[0][1])
    t=n[0].index('页')

    print("本页面有："+n[0][1:t])
    k=n[0][1:t]
    # j = id.rindex('-')
    # kk=id[0:j+1]
    urls=[]
    for i in range(int(k)+1):
        urls.append("https://www.shzx.org/a/" + id +"-"+  str(i) + ".html")
    # numstr = n[-1]
    # num = str(numstr.encode("utf-8"), encoding='utf-8')
    # print(num)
    # urls = []
    # urls.append(url)
    # for i in range(2, int(num)):
    #     urls.append("https://www.naixue.org/" + id + "_" + str(i) + ".html")
    print(urls)
    return urls


def getImgUrl(url):
    # url = "https://www.shzx.org/a/" + id + ".html"
    response = requests.get(url, headers=headers, allow_redirects=False)
    html = etree.HTML(response.text)
    # result = html.xpath('//img/@src')
    # n = html.xpath('//@class="picture"//[@src]')
    n = html.xpath('//div[@class="picture"]//img[@src]//@src')
    # n=result[0].rindex("/")
    # url=result[0][0:n-1]
    print(n)
    return n


def averageTask(l, n):
    lenth = (int)(len(l) / n if len(l) % n == 0 else len(l) / n + 1)
    return [l[m:m + lenth] for m in range(0, len(l), lenth)]



if __name__ == '__main__':
    freeze_support()
    pool = Pool(6) #线程池中的同时执行的进程数为3



    pages = getPages(url)

    # urls = makeUrls(pages)
    #
    # print("Total imgs:",urls)
    tasks=averageTask(pages,6)
    for i in tasks:

        pool.apply_async(func=downloadimg,args=(i,)) #线程池中的同时执行的进程数为3，当一个进程执行完毕后，如果还有新进程等待执行，则会将其添加进去


    print('end')
    pool.close()
    pool.join()

# downloadimg(getImgUrl(str(49229)))
# getPages(str(49229))
# getImgUrl('172-4212-0')
# getPages('290-6567-0')
# id='290-6567-0'

# n=id.rindex('-')
# print(id[0:n+1])