import requests
from bs4 import BeautifulSoup #
from selenium import webdriver #浏览器
import os #文件操作
import time #时间操作

#爬取周杰伦所有专辑图片
class Reptile:
    def __init__(self):
        self.url = "https://music.163.com/#/artist/album?id=6452&limit=36&offset=0"
        self.localDirPath = "D://picture"
    def start(self):
        print("——开始自动化爬取———")
        driver = webdriver.Chrome()
        driver.get(self.url)
        # self.scroll_win(driver,1)  #下拉刷新 1 次数
        assert "网易云音乐" in driver.title
        #切换iframe
        driver.switch_to_frame("g_iframe")
        #获取
        list = BeautifulSoup(driver.page_source,"lxml").find('ul',class_='m-cvrlst-alb4').find_all('li')
        # print(list)
        self.savePhoto(list)
    def scroll_win(self,driver,times):
        print("——开始下拉刷新——")
        for i in range(times):
            driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
            print("第", str(i + 1), "次下拉操作执行完毕")
            print("第", str(i + 1), "次等待网页加载......")
            time.sleep(30)  # 等待30秒，页面加载出来再执行下拉操作
    def savePhoto(self,list):
         self.createdir()
         for i,item in enumerate(list):
             # print(item)
             # itembs =BeautifulSoup(item,"lxml")
             #获取专辑名称
             name =item.find('a',class_='tit').text
             #获取专辑发布日期
             time =item.find('span',class_='s-fc3').text
             #获取专辑图片
             url = item.img['src']
             # print(name,time,url)
             self.createFile(name,time,url)
    def createdir(self):
        print("——创建文件夹——")
        if not os.path.abspath(self.localDirPath):
            os.mkdir(self.localDirPath)
        else:
            print("——文件夹已经存在，不需要创建——")
    def createFile(self,name,time,url):
        filepath = self.localDirPath+"/"+time+"-"+name+".jpg"
        rq = requests.get(url)
        f = open(filepath,'ab')
        f.write(rq.content)
        print(filepath,'图片保存成功！')
        f.close()
rp =Reptile();
rp.start()
