import requests
from bs4 import BeautifulSoup
import os
url = 'http://pic.netbian.com/4kfengjing/'
# http://pic.netbian.com/4kfengjing/index_2.html
headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.66 Safari/537.36'
}

#解析网页就是拿到指定的元素，获取元素的属性值，src href class name title ['attrs'] ，或者文本内容.get_text()
def get_page_index():
    '''
    获取所有的图片的网页url列表
    :return:
    '''
    page_url_list= ['http://pic.netbian.com/4kfengjing/']
    #要获取多少页，就修改range里的值
    for i in range(2,3):
        url = f'http://pic.netbian.com/4kfengjing/index_{i}.html'
        page_url_list.append(url)
    return  page_url_list

def get_img_src(url_list):
    img_src_list=[]
    for url in url_list:
        res = requests.get(url,headers=headers)
        html = res.content.decode('gbk')
        # soup 对象就是html一个网页结构对象
        soup = BeautifulSoup(html,'lxml')
        imgs = soup.select('.clearfix>li>a>img')
        #返回的是所有图片的列表
        for img in imgs:
            img_src = 'http://pic.netbian.com'+img['src']
            img_title = img['alt']
            #把我们改装后的完整的src添加到一个src列表中
            img_src_list.append({'url':img_src,'title':img_title})

    return img_src_list


def download_img(img_list):
    for item in img_list:
        #发请求获取图片
        res = requests.get(item['url'] ,headers=headers).content
        file_name = item['title']
        #把图片存到本地
        if not os.path.exists('images'):
            os.mkdir('images')
        else:
            with open(f'images/{file_name}.jpg',mode='wb') as f:
                f.write(res)
                print(f'{file_name}下载、保存成功！')

def main():
    url_list = get_page_index()
    img_src_list = get_img_src(url_list)
    download_img(img_src_list)
if __name__ == '__main__':
    main()
