from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.common.by import By
import urllib.request
import os

class webdownloadimg:
    def __init__(self,web_dz,savepath='./image/',summary=None):
        self.web_dz=web_dz
        self.savepath=savepath
        self.summary=0
        self.options=webdriver.ChromeOptions()
        self.options.add_experimental_option('detach',True)
        self.driver=webdriver.Chrome(options=self.options)
        self.title=None
        self.img_scr=[]
        self.soup=None
        self.savefile_arr=[]
        self.diaoyonggc=0

    def web_cz(self):
        self.driver.get(self.web_dz)
        self.driver.implicitly_wait(5)  # 等待5秒钟加载
        html = self.driver.page_source  # 得到整个网页的源码
        self.soup = BeautifulSoup(html, 'html.parser')  # 解析网页
        self.title=self.soup.title.string


    def web_img_dz(self):
        self.diaoyonggc=1
        self.web_cz()
        div_img1=[]
        div_img=self.soup.find_all(attrs={'class':'imgbox'})
        for img in div_img:
            img1=img.find_all('img',attrs={'class':'photoImage'})
            div_img1.extend(img1)
        for div_img2 in div_img1:
            img_scr1=div_img2.get('data-src')
            self.summary += 1
            self.img_scr.append(img_scr1)

    def web_img_save(self):
        if self.diaoyonggc!=1:
            self.web_img_dz()
        #self.web_img_dz()
        counter=0
        for i in self.img_scr:
            counter +=1
            #删除链接地址里的？号，如果没有问题就直接取值
            url_index=i.find('?')
            if url_index != -1:
                url_index = i[:url_index]
            else:
                url_index = i
            # 组合文件名
            self.file_name = str(self.title) + '_' + \
                        str(counter) + '_' + \
                        str(url_index).split('/')[-1]
            self.savefile_arr.append(self.file_name)

            #组合保存路径和上面的文件名，并开始保存
            save_path=str(self.savepath)+self.file_name
            if not os.path.exists(self.savepath):
                os.makedirs(self.savepath)
            #需要绝对路径的话在这里
            #print(os.path.abspath(save_path))
            try:
                urllib.request.urlretrieve(url_index, save_path)
                print(f"图片已保存到 {save_path},这是第{counter}/{self.summary}张图片")
            except Exception as e:
                print("下载图片失败:", str(e))
        print('本次共爬取图片数量' + str(self.summary) + '张')

    def web_class(self):
        self.driver.quit()



web_wz='https://www.zcool.com.cn/work/ZNjczODQ4MDQ=.html'
webdown=webdownloadimg(web_wz)
# webdown.web_cz()
# print(webdown.title)
webdown.web_img_save()


webdown.web_class()

