#!/usr/bin/env python3
# -*- coding:utf-8 -*-
# @Time: 2019/5/31  17:14
# @Author: 余浪人
# @Email:yulangren520@Gmail.com

import re

from spider.crawler import download_get


def save_img(html: str, path='images/'):
    '''
    网络图片本地化
    :param html: HTML响应
    :param path:  保存目录
    :return: HTML
    '''
    pic_url = re.findall('<img.*src="(.*?)"', html)  # url 列表
    content=''
    for url in pic_url:
        error = None
        base_path = os.path.join(get_sys_uri(), 'apps')
        relative_path = os.path.join(Config.FILE_PATH, path, get_date_str(),get_uuid_16() + '.' + url.split('.')[-1])
        abs_path = base_path +'/static/'+ relative_path
        dirname = os.path.dirname(abs_path)
        if not os.path.exists(dirname):
            try:
                os.makedirs(dirname)
            except:
                error = '创建目录失败!'
        elif not os.access(dirname, os.W_OK):
            error = '没有写入权限!'
        if not error:
            try:
                response = download_get(url).content
                with open(base_path + relative_path, 'wb') as fp:
                    fp.write(response)
            except:
                pass
            content = re.sub(url, relative_path, html)
    return content

