import requests
import re
import os
import threading
import queue
def get_urls():
    url = 'http://www.netbian.com/' # 主网址
    pattern = "(http.*?jpg)"
    header = {
            'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.182 Safari/537.36'
    }
    r = requests.get(url,headers=header)
    r.encoding = r.apparent_encoding
    html = r.text
    urls = re.findall(pattern,html)
    return urls
def download(url_queue: queue.Queue()):
    while True:
        url = url_queue.get()
        root_path = 'F:\\1\\' # 图片存放的文件夹位置
        file_path = root_path + url.split('/')[-1] #图片存放的具体位置
        try:
            if not os.path.exists(root_path):
                os.makedirs(root_path)
            if not os.path.exists(file_path):
                r = requests.get(url)
                with open(file_path,'wb') as f:
                    f.write(r.content)
                    f.close()
                    print('图片保存成功')
            else:
                print('图片已经存在')
        except Exception as e:
            print(e)
        print('线程名:', threading.current_thread().name,"图片剩余：", url_queue.qsize())
        if url_queue.qsize() == 0:
            break

if __name__ == "__main__":
	url_queue = queue.Queue()
	urls = tuple(get_urls())
	for i in urls:
            url_queue.put(i)
	t1 = threading.Thread(target=download,args=(url_queue,),name="craw{}".format('1'))
	t2 = threading.Thread(target=download,args=(url_queue,),name="craw{}".format('2'))
	
	t1.start()
	t2.start()
    
