import os
import re
from urllib.parse import urlparse
import hashlib
import requests
import time

from script.utils.download_utils import *


def hash_str(text):
    return hashlib.sha1(text).hexdigest()


def get_host(url):
    parse = urlparse(url)
    return parse.netloc


def get_url_file_path(url):
    temp = url.replace("://", "")
    return temp[temp.index("/"):]


def get_all_static_file(index_file_path):
    pass


# 用户目录 克隆链接
def download_static_file(download_dir, url):
    data_dir = os.path.join(download_dir, "data")
    if not os.path.exists(data_dir):
        os.makedirs(data_dir)
    index_file_path = os.path.join(download_dir, "index.html")
    state = download_to_file_path(index_file_path, url)
    if state:
        get_all_static_file(index_file_path)


def backup_webpage(url):
    pass

