from functools import cache

from fsspec.implementations.dirfs import DirFileSystem
from huggingface_hub import HfFileSystem
from transformers import AutoTokenizer, PreTrainedTokenizer
from zkl_aiutils_datasets import ChainedIndexedNamedDatasets, MappedDataset, load_and_split_parquet_files
from zkl_llmpt_iterator import HuggingFaceTokenizer, SpecialsTextTokenizer, TextTokenizer, Utf8Tokenizer, bos_name, \
    eos_name, pad_name


@cache
def load_preset_text_dataset(text_dataset_name: str, split_name: str | None = None):
    if split_name is not None:
        dataset = load_preset_text_dataset(text_dataset_name)
        return dataset.named_children[split_name]

    preset_text_datasets = {}

    def preset_text_datasets_loader(name: str):
        def preset_text_datasets_loader_register(loader):
            preset_text_datasets[name] = loader

        return preset_text_datasets_loader_register

    @preset_text_datasets_loader('wikien')
    def wikien_loader():
        parquet_files_dir_path = "datasets/wikimedia/wikipedia/20231101.en"
        fs = DirFileSystem(parquet_files_dir_path, HfFileSystem())
        splits_dataset = load_and_split_parquet_files(fs, {"train": 0.8, "valid": 0.1, "test": 0.1})
        splits_dataset = ChainedIndexedNamedDatasets({
            split_name: MappedDataset(split_dataset, lambda sample: sample['title'] + "\n\n" + sample['text'])
            for split_name, split_dataset in splits_dataset.named_children.items()})
        return splits_dataset

    @preset_text_datasets_loader('fw10bt')
    def fw10bt_loader():
        parquet_files_dir_path = "datasets/HuggingFaceFW/fineweb/sample/10BT"
        fs = DirFileSystem(parquet_files_dir_path, HfFileSystem())
        splits_dataset = load_and_split_parquet_files(fs, {"train": 0.8, "valid": 0.1, "test": 0.1})
        splits_dataset = ChainedIndexedNamedDatasets({
            split_name: MappedDataset(split_dataset, lambda sample: sample['text'])
            for split_name, split_dataset in splits_dataset.named_children.items()})
        return splits_dataset

    @preset_text_datasets_loader('fwe10bt')
    def fwe10bt_loader():
        parquet_files_dir_path = "datasets/HuggingFaceFW/fineweb-edu/sample/10BT"
        fs = DirFileSystem(parquet_files_dir_path, HfFileSystem())
        splits_dataset = load_and_split_parquet_files(fs, {"train": 0.8, "valid": 0.1, "test": 0.1})
        splits_dataset = ChainedIndexedNamedDatasets({
            split_name: MappedDataset(split_dataset, lambda sample: sample['text'])
            for split_name, split_dataset in splits_dataset.named_children.items()})
        return splits_dataset

    return preset_text_datasets[text_dataset_name]()


@cache
def load_preset_text_tokenizer(text_tokenizer_name: str):
    match text_tokenizer_name:
        case 'utf8':
            return Utf8Tokenizer()
        case 'qwen2.5':
            tokenizer = AutoTokenizer.from_pretrained('Qwen/Qwen2.5-0.5B')
            return HuggingFaceTokenizer(tokenizer)
        case 'llama3':
            tokenizer = AutoTokenizer.from_pretrained('Xenova/llama3-tokenizer')
            return HuggingFaceTokenizer(tokenizer)
        case _:
            raise ValueError(f"Unsupported tokenizer name: {text_tokenizer_name}")


@cache
def make_preset_text_tokenizer_add_bos(text_tokenizer_name: str):
    match text_tokenizer_name:
        case 'utf8':
            def tokenizer_add_bos(tokenizer: TextTokenizer):
                return SpecialsTextTokenizer(tokenizer, added_specials=[bos_name])
        case 'qwen2.5':
            def tokenizer_add_bos(tokenizer: TextTokenizer):
                base = tokenizer_get_hugging_face_base(tokenizer)
                return SpecialsTextTokenizer(tokenizer,
                    reused_specials={bos_name: base.eos_token_id})
        case 'llama3':
            def tokenizer_add_bos(tokenizer: TextTokenizer):
                base = tokenizer_get_hugging_face_base(tokenizer)
                return SpecialsTextTokenizer(tokenizer,
                    reused_specials={bos_name: base.bos_token_id})
        case _:
            raise ValueError(f"Unsupported tokenizer name: {text_tokenizer_name}")
    return tokenizer_add_bos


@cache
def make_preset_text_tokenizer_add_eos(text_tokenizer_name: str):
    match text_tokenizer_name:
        case 'utf8':
            def tokenizer_add_eos(tokenizer: TextTokenizer):
                return SpecialsTextTokenizer(tokenizer, added_specials=[eos_name])
        case 'qwen2.5':
            def tokenizer_add_eos(tokenizer: TextTokenizer):
                base = tokenizer_get_hugging_face_base(tokenizer)
                return SpecialsTextTokenizer(tokenizer,
                    reused_specials={eos_name: base.eos_token_id})
        case 'llama3':
            def tokenizer_add_eos(tokenizer: TextTokenizer):
                base = tokenizer_get_hugging_face_base(tokenizer)
                return SpecialsTextTokenizer(tokenizer,
                    reused_specials={eos_name: base.eos_token_id})
        case _:
            raise ValueError(f"Unsupported tokenizer name: {text_tokenizer_name}")
    return tokenizer_add_eos


@cache
def make_preset_text_tokenizer_add_pad(text_tokenizer_name: str):
    match text_tokenizer_name:
        case 'utf8':
            def tokenizer_add_pad(tokenizer: TextTokenizer):
                return SpecialsTextTokenizer(tokenizer, added_specials=[pad_name])
        case 'qwen2.5':
            def tokenizer_add_pad(tokenizer: TextTokenizer):
                base = tokenizer_get_hugging_face_base(tokenizer)
                return SpecialsTextTokenizer(tokenizer,
                    reused_specials={pad_name: base.pad_token_id})
        case 'llama3':
            def tokenizer_add_pad(tokenizer: TextTokenizer):
                base = tokenizer_get_hugging_face_base(tokenizer)
                return SpecialsTextTokenizer(tokenizer,
                    reused_specials={pad_name: base.eos_token_id})
        case _:
            raise ValueError(f"Unsupported tokenizer name: {text_tokenizer_name}")
    return tokenizer_add_pad


# utils

def tokenizer_get_hugging_face_base(tokenizer: TextTokenizer) -> PreTrainedTokenizer:
    base = tokenizer
    while isinstance(base, SpecialsTextTokenizer):
        base = base.base
    assert isinstance(base, HuggingFaceTokenizer)
    return base.base
