from urllib.parse import urljoin
from urllib.parse import urlparse
from urllib.parse import urlunparse
from posixpath import normpath
import os
import uuid
import requests
from bs4 import BeautifulSoup
import chardet
import hashlib

def url_join(base, url):
    """
    拼接url函数
    :param base: base url
    :param url: img url
    :return: the real img url
    """
    url1 = urljoin(base, url)
    arr = urlparse(url1)
    path = normpath(arr[2])
    return urlunparse((arr.scheme, arr.netloc, path, arr.params, arr.query, arr.fragment))

def get_file_size(filePath):
    """
    获取文件大小
    :param filePath: 文件路径
    :return: KB
    """
    fsize = os.path.getsize(filePath)
    fsize = fsize/float(1024)
    return round(fsize,2)

def gen_uuid():
    """
    :return: uid 唯一标识
    """
    uid = uuid.uuid1()
    uid = ''.join(str(uid).split('-'))
    return uid

def get_web_title(link):
    headers = {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36'}
    res = requests.get(url=link, headers=headers)
    encode_format = get_encode_format(res.content)
    res.encoding = encode_format
    soup = BeautifulSoup(res.text, 'lxml')
    title = soup.title.text
    replace_key = ['\n', '\r', '\t', ' ']
    for key in replace_key:
        title = title.replace(key, '')
    return title

def get_encode_format(html):
    encode_info = chardet.detect(html)
    encode_format = encode_info.get('encoding')
    return encode_format

def md5(s):
    m = hashlib.md5()
    m.update(s)
    return m.hexdigest()
if __name__ == '__main__':
    print(md5('https://music.163.com/#/discover/toplist?id=180106'.encode()))
