from bs4 import BeautifulSoup
import requests
import urllib.request
from threading import Thread



#获取目标图片的所有rul地址
def get_pic_urls(url):
    req = requests.get(url)
    page = req.text
    soup = BeautifulSoup(page,'lxml')
    results = soup.find_all('img',attrs={'srcset':True})
    pics = []
    for img in results:
        pic_url = img['src']
        pics.append(pic_url)
    return pics
#根据拖得的url地址进行下载
def download_pic(link):
    print('downloading...', link)
    filename = link.split('/')[-1]
    urllib.request.urlretrieve(link, 'pics/' + filename)

#实现多线程下载
def main():
    url = 'http://pixabay.com/'
    pic_urls = get_pic_urls(url)


    for i in pic_urls:
        t = Thread(target=download_pic, args=(i,))
        t.start()



if __name__ == '__main__':
    main()






