import json
from trafilatura.spider import focused_crawler
from trafilatura import sitemaps
import traceback
from dsuc import get_urls
from tqdm import tqdm

MAX_SEEN_URLS = 100
MAX_KNWON_URLS = 10000
URL_FILE = "./urls_1000.json"


def read_urls(file_path):
    urls = None
    with open(file_path, "r") as file:
        urls = json.load(file)
    return urls


def write_urls(file_path, _set, _to_dict=False):
    _list = sorted(_set)
    if _to_dict:
        _dict = {index: value for index, value in enumerate(_list)}
        with open(file_path, "w", encoding="utf-8") as f:
            json.dump(_dict, f, ensure_ascii=False, indent=4)
    else:
        with open(file_path, "w", encoding="utf-8") as f:
            json.dump(_list, f, ensure_ascii=False, indent=4)


if __name__ == "__main__":
    try:
        original_urls = read_urls(URL_FILE)
        seed_urls = sorted(set(original_urls))
        write_urls(f"./urls/url_list.json", seed_urls, True)
        _range = range(len(seed_urls))
        # Collect urls for each seed url
        for _id in tqdm(_range):
            _url = seed_urls[_id]
            print(f"\n\n_________________ Processing: url_{_id} _________________\n\n")
            print(f"\n\nURL: {_url}\n\n")
            # If the website has a complete sitemap
            sm_urls = sitemaps.sitemap_search(_url, target_lang="en")
            print(f"\n\nSitemap urls: {len(sm_urls)}\n\n")
            int_urls = get_urls(_url, deepcrawl=True, fuzzable=True, external=True)
            print(f"\n\nInternal urls: {len(int_urls)}\n\n")
            # perform the first iteration (will not work with this website,
            # there are no internal links)
            # _to_visit, _known_urls = focused_crawler(_url, max_seen_urls=1)
            # perform another iteration using previously collected information
            _to_visit, _known_urls = focused_crawler(
                _url,
                max_seen_urls=MAX_SEEN_URLS,
                max_known_urls=MAX_KNWON_URLS,
                # todo=_to_visit,
                # known_links=_known_urls,
                lang="en",
            )
            print(f"\n\nOther urls: {len(_known_urls)}\n\n")
            _known_urls.update(sm_urls)
            _known_urls.update(int_urls)
            # Record the results for each url
            write_urls(f"./urls/url_{_id}.json", _known_urls)
    except Exception as e:
        error_message = traceback.format_exc()
        print(error_message)
