import os
import sys

import fire
from tqdm import tqdm

project_dir_path = os.path.join(os.path.dirname(__file__), "../../..")
sys.path.append(project_dir_path)

from scripts.datasets.presets import load_preset_text_dataset, load_preset_text_tokenizer


def scan_tokens_ratio(
    text_dataset_name: str = "fwe10bt",
    text_dataset_split_name: str = 'train',
    text_tokenizer_name0: str = "utf8",
    # text_tokenizer_name1: str = "qwen2.5",
    text_tokenizer_name1: str = "llama3",
):
    text_dataset = load_preset_text_dataset(text_dataset_name, text_dataset_split_name)
    text_tokenizer0 = load_preset_text_tokenizer(text_tokenizer_name0)
    text_tokenizer1 = load_preset_text_tokenizer(text_tokenizer_name1)

    print(f"vocab_tokens_n({text_tokenizer_name0})={text_tokenizer0.vocab_tokens_n}")
    print(f"vocab_tokens_n({text_tokenizer_name1})={text_tokenizer1.vocab_tokens_n}")

    total_tokens0_n = 0
    total_tokens1_n = 0
    progressbar = tqdm()
    for text in text_dataset:
        tokens0 = text_tokenizer0.encode(text)
        tokens1 = text_tokenizer1.encode(text)
        total_tokens0_n += len(tokens0)
        total_tokens1_n += len(tokens1)
        progressbar.set_postfix({
            f'total_tokens_n({text_tokenizer_name0})': total_tokens0_n,
            f'total_tokens_n({text_tokenizer_name1})': total_tokens1_n,
            'tokens_ratio': total_tokens0_n / total_tokens1_n})


if __name__ == '__main__':
    fire.Fire(scan_tokens_ratio)
