#!/usr/bin/env python
#_*_coding:utf-8_*_
import urllib2
import sys
import json

reload(sys)

sys.setdefaultencoding('utf8')
from bs4 import BeautifulSoup

urls = ["http://image.baidu.com/search/avatarjson?tn=resultjsonavatarnew&ie=utf-8&pn=180&word="]
def crwn(s):
    for url in urls:
            url = url + s.encode('utf-8')
            request = urllib2.Request(url)
            request.add_header('User-Agent','Mozilla/5.0 (Windows NT 6.1; WOW64; rv:40.0) Gecko/20100101 Firefox/40.0')
            response = urllib2.urlopen(request)
            # html = open('download.html', 'w')
            # html.write(response.read())
            # html.close()
            # soup = BeautifulSoup(response.read(), 'html.parser', from_encoding='utf-8')
            # imgs = soup.find_all('img', class_='main_img img-hover')
            # for img in imgs:
            #     print img['data-imgurl']
            images = json.load(response)['imgs']
            count = 1
            for img in images:
                imgurl = img['thumbURL']
                title = img['fromPageTitle']
                type = img['type']
                print title,  type, imgurl
                request = urllib2.Request(imgurl)
                request.add_header('User-Agent', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36')
                request.add_header('Host', 't11.baidu.com')
                file = urllib2.urlopen(request)
                with open('/Users/circle/Jo/workspace/python/SpiderSystem/download/' + str(count) + '.' + type, "wb") as code:
                    code.write(file.read())
                count = count + 1

crwn(raw_input("输入搜索内容："))
