# -*- coding: UTF-8 -*-

import time
import socket
import urllib
import urllib2
import os
from bs4 import BeautifulSoup
import json

path = "C:/tmp/images5"


def SaveImage(link, count):
    try:
        time.sleep(0.2)
        socket.setdefaulttimeout(60)
        urllib.urlretrieve(link, path + '/' + str(count) + '.jpg')
    except Exception as e:
        time.sleep(1)
        print("产生未知错误，放弃保存", e.message)
    else:
        pass


def FindLink(PageNum, InputData):
    # url = 'https://cn.bing.com/images/async?q={0}&first={1}&count=35&relp=35&qft=&cw=990&ch=969&scenario=ImageBasic&datsrc=N_I&layout=RowBased&mmasync=1&dgState=x*546_y*1547_h*186_c*2_i*71_r*18&IG=EA266636381B4F2A844DF0DCDB9D25C9&SFX=3&iid=images.5709'
    # url = "https://cn.bing.com/images/search?cw=1592&ch=986&q={0}&form=IRFLTR&first={1}"
    url = "https://cn.bing.com/images/search?q={0}&FORM=BESBTB&first={1}&cw=1042&ch=997&ensearch=1"
    # 定义请求头
    agent = {
        'User-Agent': "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/31.0.165063 Safari/537.36 AppEngine-Google."}
    count = len(os.listdir(path))
    for i in range(3, PageNum):
        print("正在下载第%s页" % i)
        try:
            page1 = urllib2.Request(url.format(InputData, i * 35 + 1), headers=agent)
            page = urllib2.urlopen(page1)
            # 使用beautifulSoup进行解析网页
            soup = BeautifulSoup(page.read(), 'html.parser')
            # 创建文件夹
            if not os.path.exists(path):
                os.mkdir(path)

            for StepOne in soup.select('.iusc'):
                attr = json.loads(StepOne.attrs['m'])
                print(attr)
                if attr.has_key("murl"):
                    link = attr.get("murl")
                    print("下载图片：", link)
                    # print link
                    count += 1
                    SaveImage(link, count)
        except Exception as e:
            print('URL OPENING ERROR !', e.message)


if __name__ == '__main__':
    # 输入需要加载的页数，每页35幅图像
    PageNum = 1000
    # 输入需要搜索的关键字
    word = 'forklift'
    # UTF-8编码
    InputData = urllib.quote(word)
    print(InputData)
    FindLink(PageNum, InputData)
