import requests
from bs4 import BeautifulSoup
from time import sleep
import re

url = 'https://www.umeitu.com/bizhitupian/weimeibizhi/'
res = requests.get(url)
res.encoding = 'utf-8'  # 处理乱码

# 把页面源代码交给BeautifulSoup进行处理，生成bs对象
main_page = BeautifulSoup(res.text,'html.parser')  # 指定HTML解析器
alist = main_page.find("ul",attrs={"class":"pic-list after"}).find_all("a")
print(alist)

for a in alist:
    href = 'https://www.umeitu.com/' + a.get('href')  # 直接通过get就可以拿到属性的值
    # 拿到子页面的源代码
    child_page_res = requests.get(href)
    child_page_res.encoding = 'utf-8'
    child_page_text = child_page_res.text
    print(child_page_text)
    # 从子页面中拿到图片的下载路径
    child_page = BeautifulSoup(child_page_text,'html.parser')
    div = child_page.find('div',class_="ImageBody")
    img = div.find('img')
    src = img.get('src')

    # 下载图片
    img_res = requests.get(src)
    # obj = re.compile(r'<li><a href=".*?"><img src=".*?"><span>(?P<name>.*?)</span>',re.S)
    # result = obj.finditer(res.text)

    # for name_imgs in result:
    #     name_img = name_imgs.group('name')
    #     img_name = name_img[:2]+'.jpg'

    img_name = src.split('/')[-1][-6:]
    with open(r'D:\ui\reptile\img\%s'%img_name,'wb') as f:
        f.write(img_res.content)  # 图片内容写入到文件  img_res.content # 这里拿到的内容是字节

    print("over =>",img_name)
    sleep(1)

    # break  # 测试用

print('all_over!')

res.close()


