# 导入requests库
import requests
# 导入文件操作库
import os
import bs4
from bs4 import BeautifulSoup
import random
import time

# 越多越好 User-Agent如果太久的会连接失败
meizi_headers = [
    'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36 OPR/26.0.1656.60'
    'Opera/8.0 (Windows NT 5.1; U; en)'
    'Mozilla/5.0 (Windows NT 5.1; U; en; rv:1.8.1) Gecko/20061208 Firefox/2.0.0 Opera 9.50'
    'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; en) Opera 9.50'
    'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:34.0) Gecko/20100101 Firefox/34.0'
    'Mozilla/5.0 (X11; U; Linux x86_64; zh-CN; rv:1.9.2.10) Gecko/20100922 Ubuntu/10.10 (maverick) Firefox/3.6.10'
    'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/534.57.2 (KHTML, like Gecko) Version/5.1.7 Safari/534.57.2'
    'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.71 Safari/537.36'
    'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11'
    'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.133 Safari/534.16'
    'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/30.0.1599.101 Safari/537.36'
    'Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko'
]
# 给请求指定一个请求头来模拟chrome浏览器
global headers
headers = {'User-Agent': random.choice(meizi_headers)}
# 爬图地址
mziTu = 'https://www.meitu131.net'
# 定义存储位置
global save_path
save_path = 'D:\BeautifulPictures'

# 创建文件夹
def createFile(file_path):
    if os.path.exists(file_path) is False:
        os.makedirs(file_path)
    # 切换路径至上面创建的文件夹
    os.chdir(file_path)

# 下载文件
def download(url_sub):
    try:
        global headers
        res_sub = requests.get(url_sub, headers=headers)
        res_sub.encoding = 'utf-8'
        # 解析html
        soup_sub = BeautifulSoup(res_sub.text, 'html.parser')
        # 获取页面的"标题"作为文件名
        mkname = soup_sub.title.string
        print(mkname)
        file = save_path + '\\' + mkname
        createFile(file)
        # 获取总张数
        page_sum = soup_sub.find('a',title='Page').text.split('/')[1]
        print("套图数量：" + page_sum)
        for it in range(1, int(page_sum)+1):
            # 单位为秒，1-3 随机数
            time.sleep(random.randint(1, 3))
            headers = {'User-Agent': random.choice(meizi_headers)}
            if it == 1:
                res_sub = url_sub + 'index.html'
            else:
                res_sub = url_sub + 'index_' + str(it) + '.html'
            res_sub_2 = requests.get(res_sub, headers=headers)
            soup_sub_2 = BeautifulSoup(res_sub_2.text, "html.parser")
            img = soup_sub_2.find('p', align='center').find('img')
            if isinstance(img, bs4.element.Tag):
                url = img.attrs['src']
                array = url.split('/')
                file_name = array[len(array)-1]
                # 防盗链加入Referer
                headers = {'User-Agent': random.choice(meizi_headers), 'Referer': res_sub}
                img = requests.get(url, headers=headers)
                with open(file_name, mode="wb") as f:
                    f.write(img.content)  # 图片内容写入文件
                print(file_name, '图片保存成功！')
    except Exception as e:
        print(e)

# 主方法
def main():
    res = requests.get(mziTu + '/top/', headers=headers)
    # 使用自带的html.parser解析
    main_page = BeautifulSoup(res.text, 'html.parser')
    # 创建文件夹
    createFile(save_path)
    # 获取首页总页数
    img_max = main_page.find('div', class_='details').find_all('a', target="")
    for i in img_max:
        # 获取每页的URL地址
        src = i.get("href")
        download(mziTu + src)

if __name__ == '__main__':
    main()