# -*- coding: utf-8 -*-
"""
爬取百度检索图片
"""
import sys
reload(sys)
sys.setdefaultencoding('utf-8')

import re
import urllib
import requests
import itertools

# 百度图片url解码, str_table
str_table = {
    '_z2C$q': ':',
    '_z&e3B': '.',
    'AzdH3F': '/'
}

# 百度图片url解码, char_table
char_table = {
    'w': 'a',
    'k': 'b',
    'v': 'c',
    '1': 'd',
    'j': 'e',
    'u': 'f',
    '2': 'g',
    'i': 'h',
    't': 'i',
    '3': 'j',
    'h': 'k',
    's': 'l',
    '4': 'm',
    'g': 'n',
    '5': 'o',
    'r': 'p',
    'q': 'q',
    '6': 'r',
    'f': 's',
    'p': 't',
    '7': 'u',
    'e': 'v',
    'o': 'w',
    '8': '1',
    'd': '2',
    'n': '3',
    '9': '4',
    'c': '5',
    'm': '6',
    '0': '7',
    'b': '8',
    'l': '9',
    'a': '0'
}

char_table = {ord(key): ord(value) for key, value in char_table.items()}


class SpiderBaiduImg(object):
    """
    爬取百度检索图片
    """
    def __init__(self, word, img_num, save_image_path):
        """
        初始化
        """
        self.word = word
        self.img_num = img_num
        self.save_image_path = save_image_path
        # 百度图片链接
        self.url = "http://image.baidu.com/search/acjson?tn=resultjson_com&ipn=rj&ct=201326592&is=&fp=result&queryWord={word}&cl=2&lm=-1&ie=utf-8&oe=utf-8&adpicid=&st=&z=&ic=&word={word}&s=&se=&tab=&width=&height=&face=&istype=&qc=&nc=1&fr=&expermode=&pn={pn}&rn=30&gsm=5a&1539964466247="
    def decode(self, url):
        # 先替换字符串
        for key, value in str_table.items():
            url = url.replace(key, value)
        # 再替换剩下的字符
        return url.translate(char_table)

    def build_urls(self, word):
        """
        生成网址列表
        :param str word: 检索关键字
        :return:
        """
        word = urllib.quote(word)
        return (self.url.format(word=word, pn=x) for x in itertools.count(start=0, step=60))

    def get_img_url(self, baidu_url):
        """
        获取图片链接
        :param str baidu_url: 百度图片api接口
        :return: [img_utl]
        """
        html = requests.get(baidu_url, timeout=10).content.decode('utf-8')
        re_url = re.compile(r'"objURL":"(.*?)"')
        img_list = re_url.findall(html)
        return img_list

    def resolve_img_url(self, img_list):
        """
        解码url
        :param list img_list: 图片列表
        :return:
        """
        img_urls = [self.decode(x) for x in img_list]
        return img_urls

    def download_baidu_img(self, img_url, name):
        """
        下载图片
        :param str img_url: 百度图片链接
        :return: bool
        """
        try:
            res = requests.get(img_url)
            if res.status_code != 200:
                print ("the status_code is %s" % res.status_code)
                return False
        except Exception as e:
            print("抛出异常：", img_url)
            print("错误原因" + str(e))
            return False
        # 保存图片到本地
        content = res.content
        with open(self.save_image_path + str(name) + ".jpg", "wb") as f:
            f.write(content)

        return True
    @property
    def engine(self):
        """
        主方法
        :return:
        """
        baudi_urls = self.build_urls(self.word)
        name_index = 1
        for baudi_url in baudi_urls:
            print ("正在请求%s" % baudi_url)

            # 没检索到图片
            if len(baudi_url) == 0:
                return
            for img_url in self.resolve_img_url(self.get_img_url(baudi_url)):
                if self.download_baidu_img(img_url, name_index):
                    print ("正在下载%s===============>>%s" % (name_index, img_url))
                name_index += 1
                if name_index == self.img_num:
                    break
            if name_index == self.img_num:
                print("---------------------下载完成----------------------")
                break
        return
if __name__ == '__main__':
    # 请更换下载图片关键字, 示例: 苍老湿
    word = "苍老湿"
    # 请更换下载图片的数目, 示例: 10 【温馨提示: 数目不要过大，以免IP被封。可使用IP代理大规模爬取】
    img_num = 35
    # 请更换保存图片的途径, 示例: /Users/kunlun/Desktop/img/
    save_image_path = "/Users/kunlun/Desktop/img/"
    spider_img = SpiderBaiduImg(word, img_num, save_image_path)
    spider_img.engine
    print ("--------------------- Download Success! ----------------------")
