import requests
import time
from bs4 import BeautifulSoup

url = "https://www.umei.cc/bizhitupian/meinvbizhi/"
resp = requests.get(url)
resp.encoding = 'utf-8'  #处理乱码

#把源代码交给bs
main_page = BeautifulSoup(resp.text, "html.parser")
alist = main_page.find("div", id="infinite_scroll").find_all("a")
for a in alist:
    href ="https://www.umei.cc" + a.get('href') # 直接通过get就可以拿到属性的值
    # 拿到子页面的源代码
    child_page_resp = requests.get(href)
    child_page_resp.encoding = "utf-8"
    child_page_text = child_page_resp.text
    # 从子页面中拿到图片的下载地址
    child_page = BeautifulSoup(child_page_text, "html.parser")
    div = child_page.find("div", class_="big-pic")
    img = div.find("img")
    src = img.get("src")
    # 下载图片
    img_resp = requests.get(src)
    # img_resp.content  #这里拿到的是字节
    img_name = src.split("/")[-1]  # 拿到url中的最后一个/以后的内容
    with open("爬到的数据/美女图片/"+img_name, mode="ab") as f:
        f.write(img_resp.content)  # 图片内容写入文件

    print("over!!!", img_name)
    time.sleep(0.1)  # 歇一百毫秒


print("all over!")