import os
from typing import Callable

import torch
from transformers import AutoTokenizer, PreTrainedTokenizer
from zkl_aiutils_datasets import Dataset, load_dataset
from zkl_llmpt_iterator import HuggingFaceTokenizer, LlmptDataBatchTorch, LlmptDataset, SpecialsTextTokenizer, \
    TextTokenizer, Utf8Tokenizer, bos_name, eos_name, make_llmpt_dataset_torch, pad_name

project_dir_path = os.path.join(os.path.dirname(__file__), "../..")


def load_preset_llmpt_dataset(*,
    text_dataset_name: str,
    text_dataset_split_name: str | None = None,
    text_dataset_transform: Callable[[Dataset[str]], Dataset[str]] | None = None,
    text_tokenizer_name: str,
    batch_samples_n: int,
    chunk_tokens_n: int,
    device: torch.device | str | None = None,
) -> LlmptDataset[LlmptDataBatchTorch]:
    dataset = load_preset_text_dataset(text_dataset_name, text_dataset_split_name)

    if text_dataset_transform is not None:
        dataset = text_dataset_transform(dataset)

    total_tokens_n = get_preset_llmpt_dataset_total_tokens_n(
        text_dataset_name, text_tokenizer_name, text_dataset_split_name, text_dataset_transform)

    return make_llmpt_dataset_torch(
        dataset=dataset,
        tokenizer=load_preset_text_tokenizer(text_tokenizer_name),
        tokenizer_add_pad=make_preset_text_tokenizer_add_pad(text_tokenizer_name),
        tokenizer_add_bos_eos=make_preset_text_tokenizer_add_bos_eos(text_tokenizer_name),
        batch_samples_n=batch_samples_n,
        chunk_tokens_n=chunk_tokens_n,
        total_tokens_n=total_tokens_n,
        device=device)


# presets

def load_preset_text_dataset(text_dataset_name: str, split_name: str | None = None):
    if split_name is not None:
        dataset = load_preset_text_dataset(text_dataset_name)
        return dataset.named_children[split_name]

    datasets_dir_path = os.path.join(project_dir_path, "datasets")
    match text_dataset_name:
        case 'wiki':
            return load_dataset(os.path.join(datasets_dir_path, "text/wiki/v1.1"))
        case 'mixture':
            return load_dataset(os.path.join(datasets_dir_path, "text/mixture/v6"))
        case 'fineweb-edu-10BT':
            return load_dataset(os.path.join(datasets_dir_path, "text/fineweb-edu/10BT/v1"))
        case _:
            raise ValueError(f"Unsupported dataset name: {text_dataset_name}")


def load_preset_text_tokenizer(text_tokenizer_name: str):
    match text_tokenizer_name:
        case 'utf8':
            return Utf8Tokenizer()
        case 'qwen2.5':
            tokenizer = AutoTokenizer.from_pretrained('Qwen/Qwen2.5-0.5B')
            return HuggingFaceTokenizer(tokenizer)
        case 'llama3':
            tokenizer = AutoTokenizer.from_pretrained('Xenova/llama3-tokenizer')
            return HuggingFaceTokenizer(tokenizer)
        case _:
            raise ValueError(f"Unsupported tokenizer name: {text_tokenizer_name}")


def make_preset_text_tokenizer_add_bos_eos(text_tokenizer_name: str):
    match text_tokenizer_name:
        case 'utf8':
            def tokenizer_add_bos_eos(tokenizer: TextTokenizer):
                return SpecialsTextTokenizer(tokenizer, added_specials=[bos_name, eos_name])
        case 'qwen2.5':
            def tokenizer_add_bos_eos(tokenizer: TextTokenizer):
                base = tokenizer_get_hugging_face_base(tokenizer)
                return SpecialsTextTokenizer(tokenizer,
                    reused_specials={
                        bos_name: base.eos_token_id,
                        eos_name: base.eos_token_id})
        case 'llama3':
            def tokenizer_add_bos_eos(tokenizer: TextTokenizer):
                base = tokenizer_get_hugging_face_base(tokenizer)
                return SpecialsTextTokenizer(tokenizer,
                    reused_specials={
                        bos_name: base.bos_token_id,
                        eos_name: base.eos_token_id})
        case _:
            raise ValueError(f"Unsupported tokenizer name: {text_tokenizer_name}")
    return tokenizer_add_bos_eos


def make_preset_text_tokenizer_add_pad(text_tokenizer_name: str):
    match text_tokenizer_name:
        case 'utf8':
            def tokenizer_add_pad(tokenizer: TextTokenizer):
                return SpecialsTextTokenizer(tokenizer, added_specials=[pad_name])
        case 'qwen2.5':
            def tokenizer_add_pad(tokenizer: TextTokenizer):
                base = tokenizer_get_hugging_face_base(tokenizer)
                return SpecialsTextTokenizer(tokenizer,
                    reused_specials={pad_name: base.pad_token_id})
        case 'llama3':
            def tokenizer_add_pad(tokenizer: TextTokenizer):
                base = tokenizer_get_hugging_face_base(tokenizer)
                return SpecialsTextTokenizer(tokenizer,
                    reused_specials={pad_name: base.eos_token_id})
        case _:
            raise ValueError(f"Unsupported tokenizer name: {text_tokenizer_name}")
    return tokenizer_add_pad


def get_preset_llmpt_dataset_total_tokens_n(
    text_dataset_name: str,
    text_tokenizer_name: str,
    text_dataset_split_name: str | None,
    text_dataset_transform: Callable[[Dataset[str]], Dataset[str]] | None,
) -> int | None:
    match (text_dataset_name, text_dataset_split_name, text_tokenizer_name, text_dataset_transform):
        case ('wiki', 'train', 'utf8', None):
            return int(19.7e9)
        case ('wiki', 'train', 'qwen2.5', None):
            return int(3.69e9)
        case _:
            return None


# utils

def tokenizer_get_hugging_face_base(tokenizer: TextTokenizer) -> PreTrainedTokenizer:
    base = tokenizer
    while isinstance(base, SpecialsTextTokenizer):
        base = base.base
    assert isinstance(base, HuggingFaceTokenizer)
    return base.base
