#! usr/bin/env python
# -*- coding: utf-8 -*-

import urllib.error
import time
import os
import urllib.request
import urllib.parse
from bs4 import BeautifulSoup
import string
import re

save_dir = 'c:/bing2/'

def SaveImage(link, word):
    count = len(os.listdir(save_dir + word)) + 1
    try:
        # url解码
        name = link.split('/')[-1]
        name = urllib.parse.unquote(name)
        name = urllib.parse.unquote(name)
        name = re.sub(r'[|\\/><:?"*]','',name)
        name = save_dir+word+'/'+name
        
        if not name.endswith(('.jpg','.png','.gif','.jpeg')):
            name = name + '.jpg'
        if os.path.exists(name):
            return
        urllib.request.urlretrieve(link, name)
    except urllib.error.HTTPError as urllib_err:
        print(urllib_err)
    except Exception as err:
        print(err)
        print("产生未知错误，放弃保存:%s" % link)
    else:
        print("已下载"+word +': '+ str(count) + "张图")
    time.sleep(2)

def FindLink(InputData,word):
    for suffix in (['']+list(string.ascii_lowercase) + list(range(0,500))):
        for i in range(1000):
            print('page number: %d' % (i))
            try:
                url = 'https://cn.bing.com/images/async?q={0}&first={1}&count=35&relp=35&lostate=r&mmasync=1&dgState=x*0_y*0_h*0_c*6_i*36_r*6&IG=CB8CCCC396E14F4DBB50EC204CCBEB5E&SFX=2&iid=images.5724'
                agent = {'User-Agent': "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/31.0.165063 Safari/537.36 AppEngine-Google."}
                agent = {'User-Agent': "Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.162 Safari/537.36"}

                page1 = urllib.request.Request(url.format(InputData+str(suffix), i*35+1), headers=agent)
                page = urllib.request.urlopen(page1)
                soup = BeautifulSoup(page.read(), 'html.parser')
                with open('bing_img.html', 'wb') as f:
                    f.write(soup.prettify().encode(errors='ignore'))

                if not os.path.exists(save_dir + word):
                    os.mkdir(save_dir + word)

                for elem in soup.select('.iusc'):
                    # print(elem)
                    link=eval(elem.attrs['m'])['murl']
                    # print(link)
                    SaveImage(link,word)
                else:
                    print("未找到数据")
                    break
            except:
                print('URL OPENING ERROR !')
if __name__=='__main__':
    # '明星','人物','动物','卡通','桌子','树木','杂志',
    words=['设备']
    for word in words:
        InputData=urllib.parse.quote(word)
        FindLink(InputData,word)
