#!/usr/bin/env python
# -*- coding:utf-8 -*-
__author__ = 'wshu'
"""
    ***********************************
    *  @filename : utils.py
    *  @Author : wshu
    *  @CodeDate : 2019/12/14 22:05
    *  @Software : PyCharm
    ***********************************
    后续优化提高下载效率: asyncio | aiohttp| aiofiles| aiohttp_requests
"""
import os
import time
import logging
# import shutil
import platform
import urllib.request
from random import choice
import requests
# import asyncio
# import aiohttp
# import aiofiles
# from aiohttp_requests import requests
from utils.UserAgentList import UA

from urllib.parse import urlparse


# 初始化图片本地存储位置
def init_media_store():
    """初始化本地存储图片路径 -> 这里是将下载的图片储存到服务器目录：root/images下"""
    root = os.path.join(os.path.abspath(os.sep), 'root')
    media = os.path.join(root, 'images/')
    if os.path.exists(media):
        return media
    os.mkdir(media)
    return media

# 初始化html本地存储位置
def init_HTML_store():
    """初始化本地存储图片路径 -> 这里是将下载的图片储存到服务器目录：root/images下"""
    root = os.path.join(os.path.abspath(os.sep), 'root')
    media = os.path.join(root, 'html/')
    if os.path.exists(media):
        return media
    os.mkdir(media)
    return media

# -------------
# 图片存储路径
# img_store_path = init_media_store()
img_store_path = os.path.abspath('E:\\media')
# html存储路径
html_store_path = os.path.abspath('E:\\media')
# html_store_path = init_HTML_store()
# -------------

# 下载html到本地
def download_html(url, html_text, fpath=html_store_path):
    """
    下载html
    :param url: 通过链接区分
    """
    uuid_ = unique_id()
    netloc = urlparse(url).netloc  # 每个任务图片文件夹名称
    netloc = netloc.split(':')[0] if ":" in netloc else netloc
    issys = platform.system()
    if 'Windows' is issys:
        local_save_path = html_store_path + '\\' + netloc + '\\'  # windows 存储位置
    else:
        local_save_path = html_store_path + '/' + netloc + '/'  # linux 存储位置
    file_extens = os.path.splitext(url)[1]  # .html
    file_subffix = uuid_ + file_extens  # uuid 和 .html拼接
    mkdirname = os.path.join(fpath, netloc)
    memory_path = os.path.join(netloc, file_subffix)  # 数据库存储路径
    path_str = local_save_path + file_subffix
    # print('创建域名文件夹: ', mkdirname)
    # print('下载路径: ', path_str)
    if not os.path.exists(mkdirname):
        os.makedirs(mkdirname)
    try:
        with open(path_str, 'w', encoding='utf-8') as fp:
            fp.write(html_text)
    except Exception as err:
        print('download html file failed: {}'.format(err.__class__.__name__))
    return memory_path  # 数据库存储相对路径

# 初始化随机 headers
user_agent = choice(UA)
headers = {'User-Agent': user_agent}
# -------------------------------------

# console log
def log_console():
    """
    console log
    """
    logger = logging.getLogger("w_console")
    logger.setLevel(logging.DEBUG)
    #console print....
    ch = logging.StreamHandler()
    ch.setLevel(logging.DEBUG)
    formatter = logging.Formatter('\033[1;35;40m%(asctime)s\033[0m:%(message)s',datefmt="%Y-%m-%d %H:%M:%S")
    ch.setFormatter(formatter)
    logger.addHandler(ch)
    return logger

# file log
def log_file(*args):
    """
    file log
    :param args:
    """
    logger = logging.getLogger("w_file.log")
    logger.setLevel(logging.DEBUG)
    #创建一个handler,用于写入文件
    fh = logging.FileHandler(filename="ScanLog.log",mode='a',encoding=None,delay=False)
    fh.setLevel(logging.DEBUG)
    #定义handler输出格式
    formatter = logging.Formatter('%(asctime)s-%(message)s',datefmt="%Y-%m-%d %H:%M:%S")
    fh.setFormatter(formatter)
    #给logger添加handler
    logger.addHandler(fh)
    return logger

# 当前时间
def getTime():
    """
    :return: 获取当前时间
    """
    lt = time.localtime(time.time())
    return time.strftime("%Y-%m-%d-%H_%M_%S", lt)

# scrapy-> allowed_domains 过滤器
def filter_domain(url):
    """
    :return: allowed_domains 过滤器
    """
    allowed_domains = urlparse(url).netloc
    if allowed_domains.startswith('www'):
        allowed_domains = allowed_domains[4:]
    return allowed_domains

# scrapy Rule -> LinkExtractor 过滤器
def allow_d(url):
    """
    :param url: LinkExtractor 过滤器
    """
    allow = urlparse(url).netloc
    return allow

# img name > uuid.jpg
def unique_id():
    """
    图片路径uuid生成，作用爬虫下载图片存储本地路径. 原始图片路径: http://szlkst.cn/app/img/j002.gif
    数据库存储: szlkst/HxLVdJ71c6f9ee382e11eabcc270c94ed24b5a.gif
    """
    import uuid
    import string
    from random import sample
    ascii_sd = string.ascii_letters + string.digits
    random_str = sample(ascii_sd, 6)  # 生成6位随机字符串
    uuid1 = uuid.uuid1()    # uuid1: 基于时间戳、随机数生成，保证全球唯一
    uid = ''.join(random_str + str(uuid1).split("-"))  # 将随机数与uuid拼接
    return uid

# 下载图片
def download_picture_to_local(picture_url, fpath=img_store_path):
    """
    :explain: 结合爬虫将抓取的图片下载到本地
    """
    uuid_ = unique_id()
    proxies = {"http": "http://10.101.1.190:3129"}
    netloc = urlparse(picture_url).netloc   # 每个任务图片文件夹名称
    netloc = netloc.split(':')[0] if ":" in netloc else netloc
    issys = platform.system()
    if 'Windows' is issys:
        local_save_path = img_store_path + '\\' + netloc + '\\' # windows 存储位置
    else:
        local_save_path = img_store_path + '/' + netloc + '/'  # linux 存储位置
    # file_subffix = os.path.split(picture_url)[1]  # 获取图片文件名
    file_extens = os.path.splitext(picture_url)[1]  # 获取图片扩展名
    file_subffix = uuid_ + file_extens      # uuid 和 图片扩展拼接
    mkdirname = os.path.join(fpath, netloc)
    memory_path = os.path.join(netloc, file_subffix)
    path_str = local_save_path + file_subffix
    if not os.path.exists(mkdirname):
        os.makedirs(mkdirname)
    try:
        with open(path_str, 'wb') as jpg:
            jpg.write(requests.get(picture_url, proxies=proxies, headers=headers).content)
    except IOError as err:
        print("下载图片出错: {}".format(err.__class__.__name__))
    return memory_path

# 获取Content-Length
def get_Content_length(url):
    """获取图片文件的Content-Length值"""
    try:
        proxies = dict(http='10.101.1.190:3129')
        proxy_handler = urllib.request.ProxyHandler(proxies=proxies)
        opener = urllib.request.build_opener(proxy_handler)
        opener.addheaders = [('User-agent', user_agent)]
        urllib.request.install_opener(opener)
        rqs = opener.open(url, timeout=3)
        meta = rqs.info()
        file_size = meta.get_all('Content-Length')
        if file_size is not None:
            file_size = file_size[0]
        print("Content-Length: ", file_size)
        return file_size
    except Exception as err:
        print('[-] Get "Content-Length" failed: {}', err.__class__.__name__)
        return

# 获取失败后，尝试多次获取
def tryget_content_length(imgurl):
    """尝试获取2次，如2次未获得，默认返回 0, 在爬虫存库时调用此方法对当前和之前抓过的图片作对比"""
    i = 0
    content_length = get_Content_length(imgurl)
    if content_length is None:
        while i < 2:
            content_length = get_Content_length(imgurl)
            if content_length is not None:
                return content_length
            i += 1
            # not_get = 0
            return
    else:
        return content_length
# 新图和旧图比较
def is_picture_same(before, after):
    """
    :param before: 前一次抓取图片的大小
    :param after: 后抓取的大小
    :return:
    """
    return 'same' if before == after else 'diff'

# 获取文件图片的物理大小/ KB
def physize(url):
    """获取图片的物理大小：kb形式"""
    import io
    try:
        proxies = {"http": "http://10.101.1.190:3129"}
        image = requests.get(url, verify=False, proxies=proxies, timeout=3, headers=headers).content
        image_b = io.BytesIO(image).read()
        size = len(image_b)
        phszi = round(size / 1e3)
        return phszi
    except Exception as err:
        print('[-]Get "physical size" of picture failed: ', err.__class__.__name__)


# if __name__ == '__main__':
#     imgurl = "https://szlkst.com/images/zwgk_1tpxw201807W020180717334569853685.jpg"
#     lpath = download_picture_to_local(imgurl)
#     print(lpath)