#!/usr/bin/env python
# -*- coding:utf-8 -*-

import pymysql
import os
import sys
import mimetypes
import re
import urllib
import string
import socket
import urllib.request
import urllib.parse
import urllib.error
# 设置超时
import time

timeout = 120
socket.setdefaulttimeout(timeout)


class Crawler:
    # 睡眠时长
    __time_sleep = 0.1
    __amount = 0
    __start_amount = 0
    __counter = 0
    headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:23.0) Gecko/20100101 Firefox/23.0'}

    # 获取图片url内容等
    # t 下载图片时间间隔
    def __init__(self, t=0.1):
        self.time_sleep = t

    # 获取referrer，用于生成referrer
    def get_referrer(self, url):
        par = urllib.parse.urlparse(url)
        if par.scheme:
            return par.scheme + '://' + par.netloc
        else:
            return par.netloc

        # 保存图片
    def save_image(self, rsp_data, word):
        if not os.path.exists("./" + word):
            os.mkdir("./" + word)
        # 判断名字是否重复，获取图片长度
        self.__counter = len(os.listdir('./' + word)) + 1
        total = len(rsp_data)
        n = 1
        f = 0
        for image_info in rsp_data:
            list1 = {'image/bmp': 'bmp', 'image/cis-cod': 'cod', 'image/gif': 'gif', 'image/ief': 'ief',
                     'image/jpeg': 'jpg', 'image/pipeg': 'jfif',
                     'image/svg+xml': 'svg', 'image/tiff': 'tif', 'image/x-cmu-raster': 'ras',
                     'image/x-cmx': 'cmx', 'image/x-icon': 'ico', 'image/x-portable-anymap': 'pnm',
                     'image/x-portable-bitmap': 'pbm', 'image/x-portable-graymap': 'pgm',
                     'image/x-portable-pixmap': 'ppm',
                     'image/x-rgb': 'rgb', 'image/x-xbitmap': 'xbm', 'image/x-xpixmap': 'xpm',
                     'image/x-xwindowdump': 'xwd'}
            try:
                time.sleep(self.time_sleep)
                suffix = '.' + list1[mimetypes.guess_type(image_info['imgurl'])[0]]
                # 指定UA和referrer，减少403
                refer = self.get_referrer(image_info['imgurl'])
                opener = urllib.request.build_opener()
                opener.addheaders = [
                    ('User-agent', 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:55.0) Gecko/20100101 Firefox/55.0'),
                    ('Referer', refer)
                ]
                urllib.request.install_opener(opener)
                # 保存图片
                # print(suffix)
                if image_info['title'] == '':
                    continue
                urllib.request.urlretrieve(image_info['imgurl'],
                                           './' + word + '/' + str(self.__counter) + '_' + str(re.sub('[\s*|/|\\\|\\\\|"]', '_', image_info['title'] if image_info['title'] else '无')) + '_' + str(re.sub('[\s+|/|\\\|\\\\|"]', '_', image_info['price'] if image_info['price'] else '无')) + str(
                                               suffix))
            except urllib.error.HTTPError as urllib_err:
                print(urllib_err)
                f += 1
                continue
            except Exception as err:
                time.sleep(10)
                print(err)
                print("产生未知错误，放弃保存")
                f += 1
                continue
            else:
                # print("图+1,已有" + str(self.__counter) + "张图")
                sys.stdout.write("已下载：%s/%d  %s文件夹下已有：%d张图  下载失败数量：%d" % (n, total, word, self.__counter, f) + '\r')
                sys.stdout.flush()
                n += 1
                self.__counter += 1
        return

    # 开始获取
    def get_images(self, word='no', results=None):
        self.save_image(results, word)

        print("下载任务结束")
        return

    def start(self, word, results, spider_page_num=1, start_page=1):
        """
        爬虫入口
        :param word: 抓取的关键词
        :param results: excel内容
        :param spider_page_num: 需要抓取数据页数 总抓取图片数量为 页数x60
        :param start_page:起始页数
        :return:
        """
        self.__start_amount = (start_page - 1) * 60
        self.__amount = spider_page_num * 60 + self.__start_amount
        self.get_images(word, results)



