import os
import sys
import time
import webbrowser
import config_setting, util_store
from logger import Logger


def yield_url_list_to_snapshot(vault_path):
    for dirpath, dirnames, filenames in os.walk(vault_path):
        for filename in filenames:
            relative_path = dirpath.replace(vault_path, "").lstrip("\\").lstrip("/")  # strip the leading path
            if filename.strip().endswith("url_list.txt"):
                # with codecs.open(os.path.join(dirpath, filename), mode='r', encoding='utf-8') as f:
                #     url_list = f.readlines()
                # # url_list = [url.strip() for url in url_list]
                # url_list = list(filter(lambda x: not x.startswith("#"), url_list))
                # url_list = [item.split(" -> ")[0].strip() for item in url_list]
                # dedupe_url_list = list(set(url_list))
                # print(f"dedupe todo url list from: {vault_path}, {len(url_list)} -> {len(dedupe_url_list)} ")  # dedupe
                dedupe_url_list = util_store.read_url_list(os.path.join(dirpath, filename))
                yield dirpath, dedupe_url_list


def open_in_browse_and_yield_done_url(url_todo_list, url_skip_list):
    count = 0
    skip_count = 0
    print(f"start to scrape, total {len(url_todo_list)}")
    for url in url_todo_list:
        if url not in url_skip_list:
            print(f"{count} opening {url}...")
            webbrowser.open(url)
            count += 1
            time.sleep(10)
            yield url
        else:
            skip_count += 1
            print(f"skipped {skip_count} {url}")


def start(url_todo_list_root_folder_path):
    # url_todo_list_root_folder_path = config_setting.inbox_root_folder_path  # ./assets
    url_ignore_list_file_path = os.path.join(config_setting.vault_root_folder_path, "ignore_url_list.txt")  # ./ignore_url_list.txt
    url_done_list_file_path = os.path.join(config_setting.vault_root_folder_path, "done_snapshot_url_list.txt")  # ./done_snapshot_url_list.txt

    url_ignore_list = util_store.read_url_list(url_ignore_list_file_path)
    url_snapshot_done_list = util_store.read_url_list(url_done_list_file_path)
    for dir_path, url_todo_list in yield_url_list_to_snapshot(url_todo_list_root_folder_path):
        print(f"start path: {dir_path}...")
        # url_copy_done_list = util_store.read_url_list(os.path.join(dir_path, "done_copy_url_list.txt"))  # JM：表示url已经被分配至vault的当前目录过了
        # url_skip_list = list(set(url_snapshot_done_list + url_ignore_list + url_copy_done_list))  # JM：从Inbox中获取已经snapshot过和明确指出需要ignore的url，再加上vault本目录中已经有的url
        url_skip_list = list(set(url_snapshot_done_list + url_ignore_list))  # JM：从Inbox中获取已经snapshot过和明确指出需要ignore的url，再加上vault本目录中已经有的url

        for done_url in open_in_browse_and_yield_done_url(url_todo_list, url_skip_list):
            util_store.append_url(os.path.join(dir_path, "resume_breakpoint_url_list.txt"), done_url)
        print(f"done path: {dir_path}")


if __name__ == "__main__":
    sys.stdout = Logger()
    """
    这个脚本并不会真正去take snapshot，所做的一切都是根据url列表每10秒钟
    在默认浏览器中打开一个tab，利用浏览器插件singlefile和selfscroll插件来真正take snapshot
    设置脚本参数：
    * url_todo_list_root_path: 根目录，会遍历其中的子目录中需包含 zhihu_url_list.txt
    * url_ignore_list_filepath: ignore_url_list.txt的具体路径
    生成内容（返回值）：
    * 根据SingleFile设置将snapshot保存在指定目录
    * 在每个子目录中会生成一份done_snapshot_url_list.txt记录下载过快照的url
    """
    url_todo_list_root_folder_path = f"{config_setting.inbox_root_folder_path}/{config_setting.batch_name}"  # ./assets/20240502
    # url_todo_list_root_folder_path = f"step_3_2_output/20240502"
    start(url_todo_list_root_folder_path)


