import os

from lxml import etree
import hashlib
from loguru import logger
import re
from urllib.parse import urljoin, urlparse, urlunparse, urlencode
from posixpath import normpath


# 通用过滤函数
def data_filter(con: str, _id=None):
    try:
        assert re.search(r'^\s*$', con, re.DOTALL) is None, '内容仅为空白字符'
        escape_con = re.sub(r'\s+', ' ', etree.HTML(text=con).xpath('string(.)'))
        return escape_con if re.search(r'^\s*$', escape_con, re.DOTALL) is None else ''
    except AssertionError as e:
        if str(e) == '内容仅为空白字符':
            return ""
        logger.debug(f'ID: {_id}  MSG: {e}')
        return False
    except Exception as e:
        logger.exception(f'过滤发生异常 ID: {_id}  MSG: {e}')
        return False


# 通用补全链接函数
def url_replenish(url, base):
    if not url:
        return ''
    url1 = urljoin(base, url)
    arr = urlparse(url1)
    path = normpath(arr[2])
    return urlunparse((arr.scheme, arr.netloc, path, arr.params, arr.query, arr.fragment))


def create_md5(md_str):
    m = hashlib.md5()
    b = (md_str).encode(encoding='utf-8')
    m.update(b)
    only_id_md5 = m.hexdigest()
    return only_id_md5


def make_dir(path):
    if not os.path.exists(path):
        logger.error("下载文件不存在！")
        try:
            os.makedirs(path)
            logger.success("下载文件夹创建成功！")
        except Exception as e:
            logger.error(f"下载文件夹创建失败:{e}！")

if __name__ == '__main__':
    print(url_replenish("/2023/0530/c29187a2765721/page.htm", "https://gxtgw.zju.edu.cn/29187/list.htm"))