#多线程爬虫下载图片
import time
import requests
import urllib
import os
from bs4 import BeautifulSoup
from multiprocessing import Queue,Pool
from threading import Thread


class Reptile :
    HAEDDERS = {'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.131 Safari/537.36'}
    def __init__(self,url,count_page,count=70,filepath='images'):
        self.url = url;
        self.count_page = count_page;
        self.count = count;
        self.Qlist = Queue()
        self.filepath = filepath
        self.thead_num = 10


    def main(self):
        t1 = Thread(target=self.get_image)
        t1.start()
        ts = [Thread(target=self.SaveImage) for i in range(self.thead_num)]
        for t in ts :
            t.start()

    def get_image(self):
        for i in range(self.count_page) :
            data = {'q': '图片', 'first': i*self.count+1, 'count': self.count, 'lostate': 'r', 'mmasync': 1}
            info = requests.get(url=self.url, params=data, headers=self.HAEDDERS)
            soup = BeautifulSoup(info.text, features='html.parser')
            list = soup.findAll('img')
            for item in list:
                path = item.attrs['src'].split('?')[0]
                self.Qlist.put(path)
        for i in range(self.thead_num) :
            self.Qlist.put('ok')
        print('抓完了')

    def SaveImage(self):
        while True :
            print(self.Qlist.qsize())
            if not self.Qlist.empty() :
                try:
                    path = self.Qlist.get()
                    if 'ok' == path :
                        break
                    else :
                        time.sleep(1)
                        urllib.request.urlretrieve(path,os.path.join(self.filepath,os.path.basename(path)+'.jpg'))
                except Exception as e:
                    print(e)
if __name__ == "__main__" :
    url = 'http://cn.bing.com/images/async'
    count_page= 2
    Reptile = Reptile(url,count_page)
    Reptile.main()

