# 1. 寻找页面中的所有url  x
# 2. 过滤出匹配正则的url的页面，并将其保存至本地  x
# 3. 过滤出所有静态资源  x
# 4. 过滤出不属于此目标网站的静态资源的url x
# 5. 过滤出此网站下的url x
# 6. 将不属于此网站的静态url转换为other开始的静态资源路径，并在网页中替换器原本的url
import re
from urllib.parse import urljoin, urlparse, unquote

from utils.utils import request_get, get_path_ins, create_file

from utils.tools import url_reg, static_url_reg

run_path = get_path_ins('.')
data_path = run_path.joinpath('temp_data')


def create_url_path_file(target_url, is_other=False):
    """根据url创建文件"""
    url_ins = urlparse(target_url)
    url_path = unquote(url_ins.path)
    if is_other:
        url_path = '/others/' + url_path
    url_path_file = data_path.joinpath(url_ins.hostname, *url_path[1:].split('/'))

    if url_path_file and not url_path_file.exists():
        url_path_file.parent.mkdir(parents=True, exist_ok=True)
        try:
            resp = request_get(target_url)
        except Exception as e:
            print(e)
            resp = type('', (object,), {'content': b''})()
        url_path_file.write_bytes(resp.content)

    return url_path_file


def find_all_url(base_url, filter_url_list=None):
    """寻找指定url的页面中的url"""
    result_url_list = []

    if not filter_url_list:
        filter_url_list = []

    temp_file = create_url_path_file(base_url)

    resp_string = temp_file.read_text(encoding='utf-8')
    for url_item in set(url_reg.findall(resp_string)):
        full_url_item = urljoin(base_url, ''.join(url_item))
        if full_url_item in filter_url_list or full_url_item in result_url_list:
            continue

        result_url_list.append(full_url_item)

    return result_url_list


def find_all_static_url(base_url, filter_url_list):
    """寻找所有静态的url"""
    result_static_url_list = []

    for url_item in filter(static_url_reg.search, find_all_url(base_url)):
        full_url_item = urljoin(base_url, ''.join(url_item))
        if full_url_item not in filter_url_list:
            result_static_url_list.append(full_url_item)
            if re.search(r'\.(js|css)$', full_url_item):
                result_static_url_list.extend(find_all_static_url(full_url_item, filter_url_list))

    return result_static_url_list


def master(base_url: str):
    past_page_url_list = []
    past_static_url_list = []

    raw_all_url_list = find_all_url(base_url)
    page_url_reg = re.compile(r'\.html$')
    page_url_list = list(filter(page_url_reg.search, raw_all_url_list))

    if not page_url_list:
        print("请检查url是否以.html结尾，且无对应的文件。")

    while page_url_list:
        page_url = page_url_list.pop()
        if page_url in past_page_url_list:
            continue

        past_page_url_list.append(page_url)
        sub_page_url_list = filter(page_url_reg.search, find_all_url(page_url, past_page_url_list))
        page_url_list.extend([page_url for page_url in sub_page_url_list
                              if page_url not in page_url_list])

        sub_all_static_url_list = find_all_static_url(page_url, past_static_url_list)
        past_static_url_list.extend(sub_all_static_url_list)

    domain = urlparse(base_url).hostname.split('.')[-2]
    domain_static_url_list = []
    other_domain_static_url_list = []

    # 遍历所有静态资源，分离出 目标域名 以及 其他域名下的静态资源
    for static_url in past_static_url_list:
        if domain in static_url:
            domain_static_url_list.append(static_url)
        else:
            other_domain_static_url_list.append(static_url)

    # 下载目标域名下的静态资源
    for static_url in domain_static_url_list:
        create_url_path_file(static_url)

    # 下载其他域名下的静态资源
    for other_static_url in other_domain_static_url_list:
        url_path_file = create_url_path_file(other_static_url, is_other=True)
        # 将html中的连接替换成为本地的连接
        print(url_path_file)


if __name__ == '__main__':
    url = 'http://www.chimoph.com/index.html'
    master(url)
