import os
from dataclasses import dataclass
from typing import Any, Dict, List, Union

import torch
from datasets import Audio, DatasetDict, load_from_disk
from transformers import (
    Seq2SeqTrainer,
    Seq2SeqTrainingArguments,
    WhisperFeatureExtractor,
    WhisperForConditionalGeneration,
    WhisperProcessor,
    WhisperTokenizer,
)

# Global tokenizer cache to avoid repeated initialization
_tokenizer_cache = {}
_cache_max_size = 20  # Maximum number of tokenizers to cache


def get_cached_tokenizer(
    model_name_or_path: str, language: str, task: str = "transcribe"
):
    """
    Get a cached tokenizer for the specified language, or create and cache it if not exists.

    Args:
        model_name_or_path: The model name or path
        language: The language code
        task: The task type (default: "transcribe")

    Returns:
        Cached WhisperTokenizer instance
    """
    cache_key = f"{model_name_or_path}_{language}_{task}"

    if cache_key not in _tokenizer_cache:
        # Check if cache is full
        if len(_tokenizer_cache) >= _cache_max_size:
            print(
                f"Tokenizer cache is full ({_cache_max_size}). Clearing oldest entries..."
            )
            # Clear half of the cache (keep most recently used)
            keys_to_remove = list(_tokenizer_cache.keys())[: _cache_max_size // 2]
            for key in keys_to_remove:
                del _tokenizer_cache[key]

        print(f"Creating and caching tokenizer for language: {language}")
        _tokenizer_cache[cache_key] = WhisperTokenizer.from_pretrained(
            model_name_or_path, language=language, task=task
        )
        print(f"Tokenizer cache size: {len(_tokenizer_cache)}")

    return _tokenizer_cache[cache_key]


def clear_tokenizer_cache():
    """Clear the tokenizer cache to free memory."""
    global _tokenizer_cache
    cache_size = len(_tokenizer_cache)
    _tokenizer_cache.clear()
    return cache_size


def get_cache_info():
    """Get information about the tokenizer cache."""
    return {
        "size": len(_tokenizer_cache),
        "max_size": _cache_max_size,
        "keys": list(_tokenizer_cache.keys()),
    }


@dataclass
class DataCollatorSpeechSeq2SeqWithPadding:
    """
    Data collator that will dynamically pad the inputs received.
    """

    processor: Any
    decoder_start_token_id: int

    def __call__(
        self, features: List[Dict[str, Union[List[int], torch.Tensor]]]
    ) -> Dict[str, torch.Tensor]:
        # Split inputs and labels since they have to be of different lengths and need different padding methods
        # First treat the audio inputs by simply returning torch tensors
        input_features = [
            {"input_features": feature["input_features"]} for feature in features
        ]
        batch = self.processor.feature_extractor.pad(
            input_features, return_tensors="pt"
        )

        # Get the tokenized label sequences
        label_features = [{"input_ids": feature["labels"]} for feature in features]
        # Pad the labels to max length
        labels_batch = self.processor.tokenizer.pad(label_features, return_tensors="pt")

        # Replace padding with -100 to ignore loss correctly
        labels = labels_batch["input_ids"].masked_fill(
            labels_batch.attention_mask.ne(1), -100
        )

        # If bos token is appended in previous tokenization step,
        # cut bos token here as it's append later anyways
        if (labels[:, 0] == self.decoder_start_token_id).all().cpu().item():
            labels = labels[:, 1:]

        batch["labels"] = labels

        return batch


def prepare_dataset(batch, processor, languages=None, model_name_or_path=None):
    """
    Prepare dataset for training by extracting features and tokenizing labels
    """
    # Load and resample audio data from 16kHz to 16kHz
    audio = batch["audio"]

    # Compute log-Mel input features from input audio array
    batch["input_features"] = processor.feature_extractor(
        audio["array"], sampling_rate=audio["sampling_rate"]
    ).input_features[0]

    # For multilingual training, we need to handle language-specific tokenization
    if languages and len(languages) > 1 and model_name_or_path:
        # Multilingual training - use the language from the data if available
        language = batch.get("language", None)
        if language and language in languages:
            # Use cached tokenizer for this language
            cached_tokenizer = get_cached_tokenizer(
                model_name_or_path, language, "transcribe"
            )
            batch["labels"] = cached_tokenizer(batch["sentence"]).input_ids
        else:
            # Fallback to default tokenization
            batch["labels"] = processor.tokenizer(batch["sentence"]).input_ids
    else:
        # Single language or auto-detection
        batch["labels"] = processor.tokenizer(batch["sentence"]).input_ids

    return batch


def detect_languages_in_dataset(dataset):
    """
    Detect all languages present in the dataset

    Args:
        dataset: HuggingFace DatasetDict

    Returns:
        List of unique languages found in the dataset
    """
    all_languages = set()

    # Check all splits for languages
    for split_name, split_dataset in dataset.items():
        if "language" in split_dataset.column_names:
            languages_in_split = set(split_dataset["language"])
            all_languages.update(languages_in_split)
            print(
                f"Found languages in {split_name} split: {sorted(languages_in_split)}"
            )

    detected_languages = sorted(list(all_languages))
    print(f"Total unique languages detected: {detected_languages}")

    return detected_languages


def filter_by_languages(dataset, languages):
    """
    Filter dataset to only include specified languages
    """
    if not languages:
        return dataset

    return dataset.filter(lambda x: x["language"] in languages)


def limit_samples_per_language(dataset, max_samples_per_language, languages):
    """
    Limit the number of samples per language
    """
    if not max_samples_per_language or not languages:
        return dataset

    # Group by language and take max_samples_per_language from each
    limited_datasets = []
    for language in languages:
        lang_dataset = dataset.filter(lambda x: x["language"] == language)
        if len(lang_dataset) > max_samples_per_language:
            lang_dataset = lang_dataset.select(range(max_samples_per_language))
        limited_datasets.append(lang_dataset)

    # Concatenate all language datasets
    from datasets import concatenate_datasets

    return concatenate_datasets(limited_datasets)


def compute_metrics(pred, tokenizer):
    """
    Compute word error rate (WER) metric
    """
    from evaluate import load

    metric = load("wer")
    pred_ids = pred.predictions
    label_ids = pred.label_ids

    # Replace -100 with the pad_token_id
    label_ids[label_ids == -100] = tokenizer.pad_token_id

    # We do not want to group tokens when computing the metrics
    pred_str = tokenizer.batch_decode(pred_ids, skip_special_tokens=True)
    label_str = tokenizer.batch_decode(label_ids, skip_special_tokens=True)

    wer = 100 * metric.compute(predictions=pred_str, references=label_str)

    return {"wer": wer}


def train_whisper(
    dataset_path: str,
    model_name_or_path: str = "openai/whisper-large-v3",
    output_dir: str = "./whisper-finetuned",
    languages: List[str] = None,
    max_samples_per_language: int = None,
    train_batch_size: int = 16,
    eval_batch_size: int = 8,
    gradient_accumulation_steps: int = 1,
    learning_rate: float = 1e-5,
    warmup_steps: int = 500,
    num_train_epochs: int = 3,
    max_steps: int = -1,
    gradient_checkpointing: bool = True,
    fp16: bool = True,
    eval_strategy: str = "steps",
    per_device_eval_batch_size: int = 8,
    predict_with_generate: bool = True,
    generation_max_length: int = 225,
    save_steps: int = 500,
    eval_steps: int = 500,
    logging_steps: int = 25,
    load_best_model_at_end: bool = True,
    metric_for_best_model: str = "wer",
    greater_is_better: bool = False,
    push_to_hub: bool = False,
    report_to: List[str] = None,
):
    """
    Fine-tune Whisper model on custom dataset

    Args:
        dataset_path: Path to the HuggingFace dataset
        model_name_or_path: Pretrained model name or path
        output_dir: Directory to save the fine-tuned model
        languages: List of languages to include in training
        max_samples_per_language: Maximum samples per language
        train_batch_size: Training batch size
        eval_batch_size: Evaluation batch size
        gradient_accumulation_steps: Number of gradient accumulation steps
        learning_rate: Learning rate
        warmup_steps: Number of warmup steps
        num_train_epochs: Number of training epochs
        max_steps: Maximum training steps (-1 for no limit)
        gradient_checkpointing: Whether to use gradient checkpointing
        fp16: Whether to use mixed precision training
        eval_strategy: Evaluation strategy
        per_device_eval_batch_size: Evaluation batch size per device
        predict_with_generate: Whether to use generate for predictions
        generation_max_length: Maximum generation length
        save_steps: Save checkpoint every N steps
        eval_steps: Evaluate every N steps
        logging_steps: Log every N steps
        load_best_model_at_end: Whether to load best model at end
        metric_for_best_model: Metric to use for best model selection
        greater_is_better: Whether higher metric value is better
        push_to_hub: Whether to push to HuggingFace Hub
        report_to: List of integrations to report results to
    """
    if report_to is None:
        report_to = ["tensorboard"]

    # Load dataset
    print(f"Loading dataset from {dataset_path}")
    dataset = load_from_disk(dataset_path)

    # Auto-detect languages if not specified
    if languages is None:
        print("No languages specified, auto-detecting languages from dataset...")
        languages = detect_languages_in_dataset(dataset)
        if not languages:
            print(
                "Warning: No language information found in dataset. Using auto-detection mode."
            )
        else:
            print(f"Will train on all detected languages: {languages}")
    else:
        print(f"Training on specified languages: {languages}")

    # Filter by languages if specified
    if languages:
        print(f"Filtering dataset for languages: {languages}")
        dataset = DatasetDict(
            {
                split: filter_by_languages(dataset[split], languages)
                for split in dataset.keys()
            }
        )

    # Limit samples per language if specified
    if max_samples_per_language and languages:
        print(f"Limiting to {max_samples_per_language} samples per language")
        dataset = DatasetDict(
            {
                split: limit_samples_per_language(
                    dataset[split], max_samples_per_language, languages
                )
                for split in dataset.keys()
            }
        )

    print(
        f"Dataset sizes - Train: {len(dataset['train'])}, Validation: {len(dataset['validation'])}, Test: {len(dataset['test'])}"
    )

    # Load processor and model
    print(f"Loading model and processor from {model_name_or_path}")
    feature_extractor = WhisperFeatureExtractor.from_pretrained(model_name_or_path)

    # For multilingual training, don't specify language and task to allow flexibility
    if languages and len(languages) > 1:
        # Multilingual training - don't specify language
        tokenizer = WhisperTokenizer.from_pretrained(
            model_name_or_path, task="transcribe"
        )
        processor = WhisperProcessor.from_pretrained(
            model_name_or_path, task="transcribe"
        )
        print(f"Configured for multilingual training with languages: {languages}")

        # Pre-warm the tokenizer cache for all specified languages
        print("Pre-warming tokenizer cache for all languages...")
        for lang in languages:
            get_cached_tokenizer(model_name_or_path, lang, "transcribe")
        print(f"Tokenizer cache warmed up for {len(languages)} languages")

    elif languages and len(languages) == 1:
        # Single language training
        language = languages[0]
        tokenizer = WhisperTokenizer.from_pretrained(
            model_name_or_path, language=language, task="transcribe"
        )
        processor = WhisperProcessor.from_pretrained(
            model_name_or_path, language=language, task="transcribe"
        )
        print(f"Configured for single language training: {language}")
    else:
        # No language specified - use auto-detection
        tokenizer = WhisperTokenizer.from_pretrained(
            model_name_or_path, task="transcribe"
        )
        processor = WhisperProcessor.from_pretrained(
            model_name_or_path, task="transcribe"
        )
        print("Configured for auto-language detection")

    # Load model
    model = WhisperForConditionalGeneration.from_pretrained(model_name_or_path)

    # Set generation config based on training setup
    if languages and len(languages) == 1:
        # Single language training
        model.generation_config.language = languages[0]
        model.generation_config.task = "transcribe"
        print(f"Set generation language to: {languages[0]}")
    else:
        # Multilingual or auto-detection - don't force language
        model.generation_config.task = "transcribe"
        model.generation_config.forced_decoder_ids = None
        if hasattr(model.generation_config, "language"):
            delattr(model.generation_config, "language")
        print("Configured for multilingual generation (no forced language)")

    # Prepare dataset
    print("Preparing dataset...")
    dataset = dataset.cast_column("audio", Audio(sampling_rate=16000))
    dataset = dataset.map(
        lambda x: prepare_dataset(x, processor, languages, model_name_or_path),
        remove_columns=dataset.column_names["train"],
    )

    # Data collator
    data_collator = DataCollatorSpeechSeq2SeqWithPadding(
        processor=processor,
        decoder_start_token_id=model.config.decoder_start_token_id,
    )

    # Training arguments
    training_args = Seq2SeqTrainingArguments(
        output_dir=output_dir,
        per_device_train_batch_size=train_batch_size,
        gradient_accumulation_steps=gradient_accumulation_steps,
        learning_rate=learning_rate,
        warmup_steps=warmup_steps,
        max_steps=max_steps,
        num_train_epochs=num_train_epochs,
        gradient_checkpointing=gradient_checkpointing,
        fp16=fp16,
        eval_strategy=eval_strategy,
        per_device_eval_batch_size=per_device_eval_batch_size,
        predict_with_generate=predict_with_generate,
        generation_max_length=generation_max_length,
        save_steps=save_steps,
        eval_steps=eval_steps,
        logging_steps=logging_steps,
        report_to=report_to,
        load_best_model_at_end=load_best_model_at_end,
        metric_for_best_model=metric_for_best_model,
        greater_is_better=greater_is_better,
        push_to_hub=push_to_hub,
        dataloader_num_workers=4,
    )

    # Initialize trainer
    trainer = Seq2SeqTrainer(
        args=training_args,
        model=model,
        train_dataset=dataset["train"],
        eval_dataset=dataset["validation"],
        data_collator=data_collator,
        compute_metrics=lambda pred: compute_metrics(pred, tokenizer),
        tokenizer=processor.feature_extractor,
    )

    # Train
    print("Starting training...")
    trainer.train()

    # Save model
    print(f"Saving model to {output_dir}")
    trainer.save_model()
    processor.save_pretrained(output_dir)

    # Evaluate on test set
    print("Evaluating on test set...")
    test_results = trainer.evaluate(dataset["test"])
    print(f"Test WER: {test_results['eval_wer']:.2f}%")

    # Clear tokenizer cache to free memory
    if languages and len(languages) > 1:
        print("Clearing tokenizer cache...")
        cleared_count = clear_tokenizer_cache()
        print(f"Tokenizer cache cleared ({cleared_count} tokenizers freed)")

    return trainer, test_results