# -*- coding: utf-8 -*-
"""
Created on Fri Jun  1 21:05:44 2018

@author: chenw
"""

import json
import os
import time

import numpy as np
import requests


class BaiDu_Image_Spider():
    """ 百度图片爬虫 """
    str_dict = {
        '_z2C$q': ':',
        '_z&e3B': '.',
        'AzdH3F': '/'
    }
    intab = "wkv1ju2it3hs4g5rq6fp7eo8dn9cm0bla"
    outtab = "abcdefghijklmnopqrstuvw1234567890"
    trantab = str.maketrans(intab, outtab)
    url_format = 'https://image.baidu.com/search/acjson?tn=resultjson_com&ipn=rj&ct=201326592&is=&fp=result&queryWord={word}&cl=2&lm=-1&ie=utf-8&oe=utf-8&adpicid=&st=-1&z=&ic=0&word={word}&s=&se=&tab=&width=&height=&face=0&istype=2&qc=&nc=1&fr=&pn={pn}&rn={rn}'

    def __init__(self, search_word, img_path, img_num, img_num_need, img_file_names,
                 img_min_width=800, img_min_height=800, img_formats=('.png', '.jpg', '.jpeg'), pn_max=1000, timeout=3, time_sleep=3, url_connect_times=3):
        # search_word           搜索词
        # img_path              图片保存位置
        # img_num               已有图片数量
        # img_num_need          爬取图片数量
        # img_file_names        已有图片名称
        # img_min_width=800     图片最小宽度
        # img_min_height=800    图片最小高度
        # img_formats           图片格式
        # pn_max                图片搜索允许的最大编号
        # timeout=5             连接网页超时时间
        # time_sleep=5          连接网页失败后等待时间
        # url_connect_times=3   连接网页次数
        self.search_word = search_word
        self.img_path = img_path
        self.img_num = img_num
        self.img_num_need = img_num_need
        self.img_file_names = img_file_names
        self.img_min_width = img_min_width
        self.img_min_height = img_min_height
        self.img_formats = img_formats
        self.pn_max = pn_max
        self.timeout = timeout
        self.time_sleep = time_sleep
        self.url_connect_times = url_connect_times

    def image_spider(self, rn=30):
        """ 图片爬虫 
        input：
            pn_max：最大图片开始搜索编号
            rn：每页的图片数量
        output：
            是否完成搜索
        """
        if not os.path.exists(self.img_path):
            os.makedirs(self.img_path)
        if self.img_num < self.img_num_need:
            pn = 0
            while pn <= self.pn_max:
                # print('%s 下载%d~%d图片...' % (self.search_word, pn, pn + rn))
                search_url = self.get_search_url(pn, rn)
                url_data = self.get_url_data(search_url)
                if url_data is None:
                    pn += rn
                    continue
                res = self.download_img(url_data, pn)
                if res:
                    return True
                else:
                    pn += rn
            return False
        else:
            return True

    def get_search_url(self, pn, rn):
        """ 获取搜索网页url
        input：
            pn：图片开始搜索编号
            rn：每页的图片数量
        output：
            搜索网页url
        """
        return self.url_format.format(word=self.search_word, pn=pn, rn=rn)

    def get_url_data(self, url):
        """ 获取网页数据 
        input：
            url：搜索网页url
        output：
            网页数据
        """
        url_data = None
        decode_list = ['utf-8', 'GB18030', 'GBK']
        decode_index = 0
        for _ in range(self.url_connect_times):
            try:
                content = requests.get(url, timeout=self.timeout).content.decode(decode_list[decode_index])
                url_data = json.loads(content)
            except UnicodeDecodeError as err:
                print(err)
                decode_index += 1
                if decode_index >= len(decode_list):
                    break
                time.sleep(self.time_sleep)
            except Exception as err:
                print(err)
                time.sleep(self.time_sleep)
            else:
                break
        return url_data

    def download_img(self, data, pn):
        """ 下载图片
        input：
            data：网页数据
            pn：图片开始搜索编号
        output：
            是否下载完全部需求数据图片
        """
        for image_data in data['data']:
            if 'objURL' in image_data:
                # 是否有版权
                if image_data['partnerId'] > 0:
                    continue
                if image_data['width'] != image_data['height'] or image_data['width'] < self.img_min_width or image_data['height'] < self.img_min_height:
                    continue
                obj_url = image_data['objURL']
                img_url = self.decode_img_url(obj_url)
                for _ in range(self.url_connect_times):
                    try:
                        res = requests.get(img_url, timeout=self.timeout)
                        if str(res.status_code)[0] == "4":
                            continue
                    except Exception as err:
                        print(err)
                        time.sleep(self.time_sleep)
                    else:
                        file_name = os.path.split(res.url)[1]
                        if file_name in self.img_file_names:
                            break
                        url_img_format = os.path.splitext(file_name)[1].lower()
                        if url_img_format not in self.img_formats:
                            break
                        file_name = os.path.join(self.img_path, file_name)
                        with open(file_name, "wb") as f:
                            f.write(res.content)
                        self.img_num += 1
                        self.img_file_names.add(file_name)
                        # print(self.img_path + ' +1，已有%d张' % self.img_num)
                        if self.img_num >= self.img_num_need:
                            return True
                        break
        return False

    def decode_img_url(self, obj_url):
        """ 图片地址转码
        input：
            obj_url：图片原始url
        output：
            图片转码后url
        """
        for key, value in self.str_dict.items():
            obj_url = obj_url.replace(key, value)
        return obj_url.translate(self.trantab)
