import threading
import time
import requests
from bs4 import BeautifulSoup

url = "http://www.mzitu.com/all"
pic = "http://www.mzitu.com/108528"

''' 获取html页面'''
def downHtml(url):
    headers = {
        'User-Agent': "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/42.0.2311.135 Safari/537.36 Edge/12.10240",
        'Connection': 'Keep-Alive',
        'Referer': "http://www.mzitu.com/99566"
    }
    return requests.get(url, headers=headers).text

''' 解析获取所有的妹子url  '''
def parseHtml(html):
    soup = BeautifulSoup(html, "lxml")
    list = soup.select("a[target='_blank']")
    count = 0  # 添加一个统计链接的数量
    for a in list:
        print(a["href"], a.get_text())
    print(count)


''' 获取妹子所有图片的数量 '''
def get_pic_num(html):
    soup = BeautifulSoup(html, "lxml")
    list = soup.find("div", class_="pagenavi").find_all("span")
    return int(list[-2].string)

''' 获取妹子的图片 '''
def get_pic(html):
    soup = BeautifulSoup(html, "lxml")
    img = soup.find("div", class_="main-image").find_all("img")
    print(img[0]["src"], img[0]["alt"])
    download_pic(img[0]["src"])

''' 下载妹子的图片 '''
def download_pic(url):
    headers = {
        'User-Agent': "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/42.0.2311.135 Safari/537.36 Edge/12.10240",
        'Connection': 'Keep-Alive',
        'Referer': "http://www.mzitu.com/99566"
    }
    img = requests.get(url, headers=headers)
    t = time.time()
    nowTime = lambda: int(round(t * 1000))
    with  open("{}.jpg".format(nowTime()), 'ab') as f:
        f.write(img.content)

# 获取所有妹子的url
# html = downHtml(url)
# parseHtml(html)

#获取妹子所有的图片
html = downHtml(pic)
num = get_pic_num(html)
for i in range(num):
    str = "{}{}{}".format(pic, "/", i + 1)
    html = downHtml(str)
    t = threading.Thread(target=get_pic, args=(html,))
    t.start()
    t.join()