#完整代码
from bs4 import BeautifulSoup # 处理获取的网页信息
import requests     #下载图片需要
import urllib.request
import random
import os     #系统模块,路径的操作
import time    #时间模块


def main():
    baseurl="https://bing.iamdt.cn/"#网址
    getData(baseurl)

#------------------------------------------------------------------------------#
def askURL(url):
    headers = {
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.25 Safari/537.36 Core/1.70.3868.400 QQBrowser/10.8.4394.400"
        }#发送头信息
    request = urllib.request.Request(url, headers=headers)
    html = ""
    try:
        response = urllib.request.urlopen(request)
        html = response.read().decode("utf-8")#解码
        # print(html)
    except urllib.error.URLError as e:#打印异常信息
        if hasattr(e,"code"):
            print(e,code)
        if hasattr(e,"reason"):
            print(e,reason)
    return html
#------------------------------------------------------------------------------#
def getData(baseurl):
        headers = {
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.25 Safari/537.36 Core/1.70.3868.400 QQBrowser/10.8.4394.400"
    }  # 发送头信息
        url = baseurl
        html = askURL(url)#获得解析后的内容
        path_name = 'A'  # 创建文件名
        soup = BeautifulSoup(html,"html.parser")#html是一个HTML文档字符串，可以自动补全  lxml是指定该文档的解析方式 python自带的解析器是parser
        for item in soup.find_all('a', attrs={"class": "w3-tag w3-button w3-hover-green w3-light-grey w3-margin-bottom"}):
            galleries_url = "https://bing.iamdt.cn/" + item['href']
            gallerySoup = BeautifulSoup(askURL(galleries_url), "html.parser")
            for gallery in gallerySoup.find_all('div', attrs={"class": "w3-third"}):
                img_url = gallery.find_all('p')[0].contents[1]['href']
                print(img_url)
                # 创建图片保存路径
                if not os.path.exists(path_name):
                    os.makedirs(path_name, exist_ok=True)
                file_name = str(int(time.time()) * 1000) + '.png'
                file_name = os.path.join(path_name, file_name)
                if not os.path.exists(file_name):
                    time.sleep(random.randint(3, 8))
                    r = requests.get(img_url, headers=headers)  # 下载图片
                    if r.status_code == 200:
                        with open(file_name, 'wb') as f:
                            f.write(r.content)


if __name__ =="__main__":
    main()
    print("任务结束啦！！！")