'''此脚本用来将大量的http网址提取成规则的url'''
import charset_normalizer


def check_charset(file_path):
    with open(file_path, 'rb') as f:
        data = f.read(4)
        charset = charset_normalizer.detect(data)['encoding']
        return charset


def getUrls(filePath):
    with open(filePath,encoding=check_charset(filePath)) as f:
        urls = [url.strip() for url in f.readlines() if url.strip() != ""]
        urls = [i if i.startswith("http") else "http://" + i for i in urls ]
    return urls


def get_urls(file_path):
    with open(file_path, encoding=check_charset(file_path)) as f:
        urls = f.readlines()
        # 由于网址文件格式问题，将所有的url拼接在一起
        urlStr = ''.join([url.strip() for url in urls])
        # 通过http://将url字符串分割并拼接成新的url，并将空的http://字段删除
        urls = urlStr.split('http://')
        url_li = []
        for url in urls:
            # 去除掉多余的http://字段
            if url:
                url_li.append("http://" + url)
    print(f"一共需要抓取{len(url_li)}个网址")
    return list(set(url_li))
