import os
import requests
from lxml import etree
import multiprocessing
from multiprocessing import Pool,freeze_support
import numpy as np
##headers = {'User-Agent':"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36", "Referer": "https://www.meitulu.com"}
headers = {'User-Agent':"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36", "Referer": "https://www.meitulu.com"}

def downloadimg(listurl):
    dir=listurl[0].split('/')[-2]
    dir="k:/girls/"+dir+"/"

    if not os.path.exists(dir):  ##如果没有这个文件夹的话，创造并进入
        os.makedirs(dir)  ##创建一
    ## https://www.meitulu.com/img.html?img=https://mtl.xtpxw.com/images/img/13586/1.jpg

    for url in listurl:
        dowloadheaders = {
            'User-Agent': "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36",
            "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3",
            "Accept-Encoding": "gzip, deflate, br",
            "Accept-Language": "zh-CN,zh;q=0.9",
            "Connection": "keep-alive",
            "Host": "mtl.gzhuibei.com"
            }
        ##上面美图录喜欢修改host
        response = requests.get(url, headers=dowloadheaders, allow_redirects=False)
        imgname = url.split('/')[-1]
        path=dir+imgname

        with open(path, 'wb')as img:
            img.write(response.content)

def getimgurls(url):
    #url = 'https://www.meitulu.com/item/15099.html'
    response = requests.get(url, headers=headers, allow_redirects=False)

    html = etree.HTML(response.text)
    print(html.xpath("//center/img/@src"))
    img_urls = html.xpath("//center/img/@src")
    return img_urls

def getpageurls(url):   #根据URL找出所有分页url
    response = requests.get(url, headers=headers, allow_redirects=False)
    html = etree.HTML(response.text)
    print(html.xpath("//center/div[@id='pages']/a/@href"))
    urls=html.xpath("//center/div[@id='pages']/a/@href")

    pageurls=[]
    for url in urls:
        url='https://www.meitulu.com'+url
        pageurls.append(url)
    print(pageurls)
    return pageurls

def getnewpageurls(url):   #根据URL找出所有分页url
    response = requests.get(url, headers=headers, allow_redirects=False)
    html = etree.HTML(response.text)
    print(html.xpath("//center/div[@id='pages']/a/text()"))
    pages=html.xpath("//center/div[@id='pages']/a/text()") #根据分页链接最末页的数值形成所有页面url
    pageurls = []
    if(len(pages)!=0):
        maxpage = pages[-2]
        t = url.split('/')[-1].split('.')[0]
        print(url.split('/')[-1].split('.')[0])

        pageurls.append(url)
        for n in range(2, int(maxpage) + 1):
            url = "https://www.meitulu.com/item/" + t + '_' + str(n) + ".html"
            pageurls.append(url)
    else:
        pageurls.append(url)
    print(pageurls)
    return pageurls

def download(urls):
    for url in urls:
        imgurls = getimgurls(url)
        downloadimg(imgurls)

def getpageurls(url):   #根据URL找出所有分页url
    response = requests.get(url, headers=headers, allow_redirects=False)
    html = etree.HTML(response.text)
    print(html.xpath("//center/div[@id='pages']/a/@href"))
    urls=html.xpath("//center/div[@id='pages']/a/@href")

    pageurls=[]
    for url in urls:
        url='https://www.meitulu.com'+url
        pageurls.append(url)
    print(pageurls)
    return pageurls

def getCatageloueUrls(url): #获取分类下套图的起始url
    response = requests.get(url, headers=headers, allow_redirects=False)
    html = etree.HTML(response.text)
    urls=html.xpath("//ul[@class='img']/li/a/@href")
    print(urls)
    return urls

def getCatageloueTotalUrls(url): #获取分类下套图的起始url
    response = requests.get(url, headers=headers, allow_redirects=False)
    html = etree.HTML(response.text)
    pages = html.xpath("//center/div[@id='pages']/a/text()")  # 根据分页链接最末页的数值形成所有页面url
    maxpage = pages[-2]
    t = url.split('/')[-1].split('.')[0]
    print(url.split('/')[-1].split('.')[0])
    pageurls = []
    pageurls.append(url)
    for n in range(2, int(maxpage) + 1):
        url = "https://www.meitulu.com/item/" + t + '_' + str(n) + ".html"
        pageurls.append(url)

    print(pageurls)
    saveUrls(pageurls)
    #urls=html.xpath("//ul[@class='img']/li/a/@href")
   # print(urls)
    return urls

def averageTask(l, n):
    lenth = (int)(len(l) / n if len(l) % n == 0 else len(l)/n +1)
    return [l[m:m+lenth] for m in range(0,len(l),lenth)]

def task(urls):
    for url in urls:
        download(getnewpageurls(url))



def saveUrls(urls):
    np.save('urls',urls)

def loadUrls():
    urls=[]
    return np.load('urls.npy')

def getCatageloueTotalUrls(url): #获取分类下套图的起始url
    response = requests.get(url, headers=headers, allow_redirects=False)
    html = etree.HTML(response.text)
    pages = html.xpath("//center/div[@id='pages']/a/text()")  # 根据分页链接最末页的数值形成所有页面url
    pageurls = []
    if(len(pages)!=0):             #防止页面没有分页链接造成的index越界异常
        maxpage = pages[-2]
        t = url.split('/')[-2] + "/"
        pageurls = []
        pageurls.append(url)
        print(pageurls)
        for n in range(2, int(maxpage) + 1):
            url = "https://www.meitulu.com/t/" + t + str(n) + ".html"
            pageurls.append(url)

        print(pageurls)
    else:
        pageurls.append(url)
    urlinits=[]
    for url in pageurls:
       urlinits+=getCatageloueUrls(url)
    return urlinits



def mytask(urls):
    dataurls=[]
    for url in urls:                #获取每一页页面所有图片的链接url
        dataurls+=getnewpageurls(url)
    for url in dataurls:
        print("download "+url)
        urls=getimgurls(url)
        downloadimg(urls)

if __name__ == '__main__':
    freeze_support()
    pool = Pool(multiprocessing.cpu_count()) #线程池中的同时执行的进程数为3

    #urls = getCatageloueUrls('https://www.meitulu.com/t/ligui/')
    '''
        urls = getCatageloueUrls('https://www.meitulu.com/t/jipin/')
    urls = getCatageloueUrls('https://www.meitulu.com/t/taiwanmeinv/2.html')
    '''

    #urls = getCatageloueTotalUrls('https://www.meitulu.com/t/1400/')
   # urls = getCatageloueUrls('https://www.meitulu.com/item/20710.html')
    urls=getnewpageurls('https://www.meitulu.com/item/21616.html')

    tasks=averageTask(urls,20)
    for i in tasks:
        pool.apply_async(func=mytask,args=(i,)) #线程池中的同时执行的进程数为3，当一个进程执行完毕后，如果还有新进程等待执行，则会将其添加进去
        # pool.apply(func=Foo,args=(i,))

    print('end')
    pool.close()
    pool.join()#调用join之前，先调用close函数，否则会出错。执行完close后不会有新的进程加入到pool,join函数等待所有子进程结束
