# 导入requests库
import requests
# 导入文件操作库
import os
from bs4 import BeautifulSoup
import random
import time

meizi_headers = [
    'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36 OPR/26.0.1656.60'
    'Opera/8.0 (Windows NT 5.1; U; en)'
    'Mozilla/5.0 (Windows NT 5.1; U; en; rv:1.8.1) Gecko/20061208 Firefox/2.0.0 Opera 9.50'
    'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; en) Opera 9.50'
    'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:34.0) Gecko/20100101 Firefox/34.0'
    'Mozilla/5.0 (X11; U; Linux x86_64; zh-CN; rv:1.9.2.10) Gecko/20100922 Ubuntu/10.10 (maverick) Firefox/3.6.10'
    'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/534.57.2 (KHTML, like Gecko) Version/5.1.7 Safari/534.57.2'
    'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.71 Safari/537.36'
    'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11'
    'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.133 Safari/534.16'
    'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/30.0.1599.101 Safari/537.36'
    'Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko'
]
# 给请求指定一个请求头来模拟chrome浏览器
global headers
headers = {'User-Agent': random.choice(meizi_headers)}
# 爬图地址
url = "https://www.sanqianjiali.net/"
# 定义存储位置
global save_path
save_path = 'D:\sanqianjiali'

# 创建文件夹
def createFile(file_path):
    if os.path.exists(file_path) is False:
        os.makedirs(file_path)
    # 切换路径至上面创建的文件夹
    os.chdir(file_path)

def download(url_sub):
    global headers
    res_sub = requests.get(url_sub, headers=headers)
    res_sub.encoding = 'utf-8'
    # 从子页面中拿到图片的下载路径
    child_page = BeautifulSoup(res_sub.text, "html.parser")
    mkname = child_page.title.string
    print(mkname)
    if os.path.exists(mkname) is False:
        os.makedirs(mkname)

    img = child_page.find_all("img", class_="swiper-lazy")
    for it in img:
        src = it.get("data-src")
        # print(src)
        # 下载图片
        headers = {'User-Agent': random.choice(meizi_headers), 'Referer': url}
        img_resp = requests.get(src, headers=headers)
        img_name = src.split("/")[-1]  # 拿到url中的最后一个/以后的内容
        with open(mkname + '/' +img_name, mode="wb") as f:
            f.write(img_resp.content)  # 图片内容写入文件

        print("over!!!", img_name)
        time.sleep(random.randint(1, 3))

# 主方法
def main():
    res = requests.get(url, headers=headers)
    res.encoding = 'utf-8'
    # 使用自带的html.parser解析
    main_page = BeautifulSoup(res.text, 'html.parser')
    # 创建文件夹
    createFile(save_path)
    # 获取首页总页数
    alist = main_page.find("div", class_="grid").find_all("a")
    for i in alist:
        # 获取每页的URL地址
        href = i.get('href')  # 直接通过get就可以拿到属性的值
        download(href)
    print("all over!!!")

if __name__ == '__main__':
    main()