# long 爬虫
# {2021/9/8}
# 本章目的：
import requests
from bs4 import BeautifulSoup
import time
url = "https://www.byj1.me/videos/shuiguoav/2"

data = requests.get(url)
data.encoding='utf-8'

main_page = BeautifulSoup(data.text,"html.parser")
alist = main_page.find("div",class_="grid mx-auto videos").find_all("div",class_="video-thumb video-preview")

for it in alist:
    lst = it.find("a")
    href = 'https://www.byj1.me'+lst.get('href')
        # 来拿详情页面
    child = requests.get(href)
    child.encoding='utf-8'
    child_conont = child.text
    #从子页面详情看路径
    child_page = BeautifulSoup(child_conont,"html.parser")
    z_alist = child_page.find("ul",id="lightSlider").find_all("li")
    for itt in z_alist:
        lstt  = itt.find("a")
        img = lstt.get('href')
        # 保存图片
        img_resp = requests.get(img)
        # img_resp.content #写到字节中
        # 进行切割获取最后的名字
        img_name = img.split("/")[-1]
        name = str(int(time.time()))
        with open("img/"+name+".jpg",mode="wb") as f:
            # 图片内容写入文件
            f.write(img_resp.content)

        print("ok",img_name)
        time.sleep(1)


# print(alist)