import codecs
import os

import config_setting
import util_hash
import util_path
import util_snapshot
import util_store


def scan_inbox(inbox_folder_path):
    print(f"scanning {inbox_folder_path}...")
    done_url_list = []
    count = 0
    for dirpath, filename in util_path.travel_file(inbox_folder_path, lambda dirpath, filename: filename.endswith(".html")):
        full_file_path = os.path.join(dirpath, filename).replace("\\", "/")
        with codecs.open(full_file_path, mode='r', encoding='utf-8') as f:
            content_head = f.read(2000)
        url_embedded = util_snapshot.getUrl_from_content(content_head).strip()
        if url_embedded.startswith("http://"):
            url_embedded = url_embedded.replace("http://", "https://")
        title_from_path = util_snapshot.getTitle_from_path(os.path.basename(full_file_path))
        # hash_url_https = util_hash.hash_url(url_embedded, force_to_https=False)
        util_store.append_url_with_subsidiary(os.path.join(output_path, "downloaded_reference_list.txt"), url_embedded, title_from_path)
        count += 1
        done_url_list.append(url_embedded)
    return done_url_list


# def scan_asset(asset_folder_path):
#     print(f"scanning {asset_folder_path}")
#     count = 0
#     for dirpath, dirnames, filenames in os.walk(asset_folder_path):
#         for filename in filenames:
#             relative_path = dirpath.replace(asset_folder_path, "").lstrip("\\").lstrip("/")  # strip the leading path
#             if filename.strip().endswith("url_list.txt"):
#                 dedupe_url_list = util_store.read_url_list(os.path.join(dirpath, filename))
#                 dedupe_url_list = [url.replace("http://", "https://") if url.startswith("http://") else url for url in dedupe_url_list]
#                 yield dirpath, dedupe_url_list


def yield_url_list_to_snapshot(vault_path):
    for dirpath, dirnames, filenames in os.walk(vault_path):
        for filename in filenames:
            relative_path = dirpath.replace(vault_path, "").lstrip("\\").lstrip("/")  # strip the leading path
            if filename.strip() == "url_list.txt":
                dedupe_url_list = util_store.read_url_list(os.path.join(dirpath, filename))
                dedupe_url_list = [url.replace("http://", "https://") if url.startswith("http://") else url for url in
                                   dedupe_url_list]
                yield dirpath, dedupe_url_list


if __name__ == "__main__":
    inbox_folder_path = f"E:/inbox/20231006_allinone"
    url_todo_list_root_folder_path = "E:/WebArchivesVault"
    output_path = f"./step_3_2_output/all_in_one"
    if not os.path.isdir(output_path):
        os.mkdir(output_path)

    done_url_list = scan_inbox(inbox_folder_path)
    todo_url_list = []
    for dir_path, url_todo_list in yield_url_list_to_snapshot(url_todo_list_root_folder_path):
        print(f"start path: {dir_path}...")
        todo_url_list = todo_url_list + url_todo_list
    url_ignore_list_file_path = os.path.join("./", "ignore_url_list.txt")
    ignore_url_list = util_store.read_url_list(url_ignore_list_file_path)

    difference = list(set(todo_url_list) - set(done_url_list) - set(ignore_url_list))

    print(difference)
    util_store.write_url_list(os.path.join(output_path, "difference_url_list.txt"), difference)