from urllib.request import urlretrieve
from bs4 import BeautifulSoup
import requests
import webbrowser
import random
import os

def download(url, dst, way=2):
    if way == 1:  # 法一
        r = requests.get(url)
        with open(dst, 'wb') as f:
            f.write(r.content)
    if way == 2:  # 法二：通过stream下一点存一点，而非先全部加载到内存
        r = requests.get(url, stream=True)
        with open(dst, 'wb') as f:
            for chunk in r.iter_content(chunk_size=32):
                f.write(chunk)
    if way == 3:  # 法三
        urlretrieve(url, dst)

def SearchAndDownload(keyword,imagesize=None,aspect =None,n = 1):
    # 初始化
    url = 'https://cn.bing.com/images/search?s'
    cnt = 0
    img_src = set()
    
    if imagesize==None:
        imagesize = random.choice(['wallpaper','large','medium','small'])
    if aspect ==None:
        aspect = random.choice(['square','wide','tall'])
    
    for _ in range(n):
        param = {'q':keyword,'qft':'+filterui:imagesize-{}+filterui:aspect-{}'.format(imagesize,aspect),'first':str(_)}
        response = requests.get(url,params=param)
        webbrowser.open(response.url)
        soup = BeautifulSoup(response.text, 'lxml')
        src = soup.find_all('img', {'class': 'mimg rms_img'})
        for each in src:
            try:
                img_src.add(each['src'])
            except KeyError:
                img_src.add(each['data-src'])
    folder = './folder/'
    for each in img_src:
        cnt+=1
        # 学会使用os.path.split分割文件名：filepath,filename = os.path.split(img_url)
        download(each, os.path.join(folder,'image{}.png'.format(cnt)))


keyword = 'apple'
SearchAndDownload(keyword)