import requests
from selenium import webdriver
from bs4 import BeautifulSoup
import time
import os

class ReptilePicture():
    def __init__(self):
        self.url = "https://unsplash.com/t/wallpapers"
        self.local_path = "D://picture"
    def get_pic(self):  #获取图片
        print("开始启动自动化————")
        driver = webdriver.Chrome();
        driver.get(self.url)
        self.scroll_win(driver,1)  #1 次数
        print("开始获取图片路径")
        urls = []
        imgs = BeautifulSoup(driver.page_source,'lxml').find_all('img',{'_2zEKz'})
        for img in imgs:
            try:
                urls.append(img['src'])
            except:
                    continue
        print("开始保存图片++++++")
        self.create_dir(urls)
    # 下拉加载
    def scroll_win(self,driver,times):
        for i in range(times):
            print("开始执行第", str(i + 1),"次下拉操作")
            driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")  #执行JavaScript实现网页下拉倒底部
            print("第", str(i + 1), "次下拉操作执行完毕")
            print("第", str(i + 1), "次等待网页加载......")
            time.sleep(40)  # 等待30秒，页面加载出来再执行下拉操作
    # 创建存放图片的文件夹,并下载图片
    def create_dir(self,urls):
        print(self.local_path)
        if not os.path.abspath(self.local_path):
            os.mkdir(self.local_path)
        self.downloadImg(urls)
    def downloadImg(self,urls):
        for i,url in enumerate(urls) :
            print(url)
            try:
                # headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1'}  #给请求指定一个请求头来模拟chrome浏览器
                # img = requests.get(url,headers)
                img = requests.get(url)
            except Exception as e:
                print("exception error:",e)
                continue
            filepath = self.local_path+'/'+str(i)+'.jpg'
            f = open(filepath,'ab')
            f.write(img.content)
            print(filepath,'图片保存成功！')
            f.close()
re = ReptilePicture();
re.get_pic()
