#!/usr/bin/env python3
# -*- coding:utf-8 -*-
# @Time: 2019/6/4  20:54
# @Author: 余浪人
# @email: yulangren520@gmail.com

from apps import get_sys_uri
from apps.lib.public import get_date_str, get_uuid_16
import os, re
from apps.setting import Config
from apps.spiders.crawler import download_get


def save_webImg(url, rul_obj, path):
    '''
    网络图片本地化
    :param path:  保存目录
    :return: url
    '''
    error = ''
    base_path = os.path.join(get_sys_uri(), 'apps')
    relative_path = os.path.join(Config.FILE_PATH, path, get_date_str(), get_uuid_16() + '.' + url.split('.')[-1])
    abs_path = (base_path + '/static/' + relative_path).replace('\\', '/')
    dirName = os.path.dirname(abs_path)
    if not os.path.exists(dirName):
        try:
            os.makedirs(dirName)
        except:
            error = '创建目录失败!'
    elif not os.access(dirName, os.W_OK):
        error = '没有写入权限!'
    if not error:
        try:
            response = download_get(url, rul_obj)
            with open(abs_path, 'wb') as fp:
                fp.write(response.content)
                fp.close()
        except:
            pass
        return '/' + relative_path


def save_img(html: str,rul_obj: int, path='images/', goal_url=None, ):
    pic_url = re.findall('<img.*src="(.*?\..*?)"', html)  # url 列表
    content = html
    for base_url in pic_url:
        url = goal_url+base_url if goal_url else base_url  # 判断是否绝对地址
        img_url  = re.sub(r'\\','/',save_webImg(url,rul_obj,path))
        content = content.replace(base_url, img_url)
    return content


