import os
import requests
from lxml import etree
import multiprocessing
from multiprocessing import Pool, freeze_support
import numpy as np

# headers = {'User-Agent':"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36", "Referer": "https://www.nvshens.org"}
headers = {

    "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
    "Accept-Encoding": "gzip, deflate, br",
    "Accept-Language": "zh-CN,zh;q=0.9",
    "Cache-Control": "max-age=0",
    "Connection": "keep-alive",
    # "Cookie": "Hm_lvt_0f1f5a5e4d9fc6dc0f52ab6f2ec45893Hm_lvt_0f1f5a5e4d9fc6dc0f52ab6f2ec45893=1596978645,1597022664,1597049777; Hm_lpvt_0f1f5a5e4d9fc6dc0f52ab6f2ec45893=1597053804",

    "Host": "m.quantuwang.cc",
    "Upgrade-Insecure-Requests": "1",
    'User-Agent': "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36",
}
#<img src="http://d.quantuwang.co/2021/01/07/23805/53.jpg" alt="[IMISS]爱蜜社 VOL.540 Vanessa(53)">
# <img src="http://d.quantuwang.co/2020/12/21/23604/1.jpg" alt="[IMISS]爱蜜社 VOL.538 Vanessa(2)">
tkurl=""

def downloadimg(id):
    # dir=listurl[0].split('/')[-2]
    dir = "d:/girls/" + id + "/"

    if not os.path.exists(dir):  ##如果没有这个文件夹的话，创造并进入
        os.makedirs(dir)  ##创建一
    ## https://www.meitulu.com/img.html?img=https://mtl.xtpxw.com/images/img/13586/1.jpg
    nums = getPagesnew(id)

    urls = makeUrls(id, nums)
    print(urls)
    for url in urls:
        dowloadheaders = {
            'User-Agent': "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36",

            "Referer": "",
        }
        ##上面美图录喜欢修改host
        dowloadheaders["Referer"] = "https://www.nvshens.org/img.html?img=" + url
        response = requests.get(url, headers=dowloadheaders, allow_redirects=False)
        imgname = url.split('/')[-1]
        path = dir + imgname

        with open(path, 'wb')as img:
            img.write(response.content)


def makeUrls(id, n):
    nums = []
    urls = []

    for i in range(n):
        nums.append(str(i).rjust(3, '0'))
    # url="https://img.onvshen.com:85/gallery/23100/"+id+"/s/"+nums[i]+".jpg"
    for i in range(n):
        urls.append("https://t1.onvshen.com:85/gallery/"+tkurl+"/" + id  +'/'+ nums[i] + ".jpg")

    # 0号图片的url比较特殊
    urls[0] = "https://t1.onvshen.com:85/gallery/"+tkurl+"/" + id  + "/0.jpg"
    return urls


# print(makeUrls("234",14))




def getPagesnew(id):
    url = "https://www.nvshens.org/g/" + id + "/1.html"
    response = requests.get(url, headers=headers, allow_redirects=False)
    html = etree.HTML(response.text)
    n = html.xpath('//span[@style="color: #DB0909"]/text()')
    t=n[0][:-3]
    print(t)
    imgNums=int(t)
    num=round(imgNums/3)
    print("本页面有：" +str(num) )

    aa=html.xpath('//ul[@id="hgallery"]/img[1]/@src')
    #获取 "https://t1.onvshen.com:85/gallery/27559/33413/s/0.jpg" 中的27559
    global tkurl
    tkurl=aa[0].split('/')[-4]

    print("图库URL："+tkurl)
    return imgNums

def getPages(id):
    url = "https://m.nvshens.net/g/" + id + "/"
    # url="https://m.nvshens.net/g/"+id+"/1.html"
    response = requests.get(url, headers=headers, allow_redirects=False)
    html = etree.HTML(response.text)
    print(html)
    # n=html.xpath('//span[@class="page"]/text()')
    n = html.xpath('//span[@class="page"]/text()')
    print("本页面有：")
    num = n[0].split('/')[-1]
    print(num)
    return num

def getImgUrl(id):
    url = "https://m.nvshens.net/g/" + id + "/1.html"
    response = requests.get(url, headers=headers, allow_redirects=False)
    html = etree.HTML(response.text)
    result = html.xpath('//img/@src')
    n = html.xpath('//div[@class ="ck-parent-div"]')

    print(result)
    return n


# getPages("33413")
downloadimg("33447")
# getImgUrl("33420")
# getPagesnew("33")
