import json
from trafilatura.spider import focused_crawler
from trafilatura import sitemaps
from tqdm import tqdm

MAX_SEEN_URLS = 100
MAX_KNWON_URLS = 10000


def read_urls(file_path):
    urls = None
    with open(file_path, "r") as file:
        urls = json.load(file)
    return urls


def write_urls(file_path, _set):
    _list = sorted(_set)
    with open(file_path, "w", encoding="utf-8") as f:
        json.dump(_list, f, ensure_ascii=False, indent=4)


if __name__ == "__main__":
    original_urls = read_urls("./urls_1000.json")
    seed_urls = sorted(set(original_urls))
    known_urls = set()
    _range = range(len(seed_urls))
    # Collect urls for each seed url
    for _id in tqdm(_range):
        _url = seed_urls[_id]
        print(f"_________________ Processing: url_{_id} _________________")
        print(f"URL: {_url}")
        # If the website has a complete sitemap
        sm_urls = sitemaps.sitemap_search(_url, target_lang="en")
        print(f"Sitemap urls: {len(sm_urls)}")
        # perform the first iteration (will not work with this website,
        # there are no internal links)
        # _to_visit, _known_urls = focused_crawler(_url, max_seen_urls=1)
        # perform another iteration using previously collected information
        _to_visit, _known_urls = focused_crawler(
            _url,
            max_seen_urls=MAX_SEEN_URLS,
            max_known_urls=MAX_KNWON_URLS,
        )
        print(f"Other urls: {len(_known_urls)}")
        _known_urls.update(sm_urls)
        # Record the results for each url
        write_urls(f"./urls/url_{_id}.json", _known_urls)
        known_urls = known_urls.union(_known_urls)
        print(f"{len(known_urls)} URLs collected.")
    write_urls("./urls/url_all.json", known_urls)
