import json
import os
import random
import re

from datasets import Audio, Dataset, DatasetDict, Features, Value


def convert_label_studio_to_hf_dataset(
    json_path,
    audio_base_dir,
    output_path,
    train_ratio=0.8,
    valid_ratio=0.1,
    max_valid_size=500,
    max_test_size=500,
):
    # Load Label Studio export
    with open(json_path, "r", encoding="utf-8") as f:
        label_studio_data = json.load(f)

    # Extract relevant data
    dataset_rows = []
    for item in label_studio_data:
        # Get the audio path from data.audio field
        # "/data/local-files/?d=split_audio/clip-001/chunk_8.wav" -> "split_audio/clip-001/chunk_8.wav"
        audio_path = item["data"]["audio"]
        audio_path = re.search(r"\?d=(.+)", audio_path).group(1)
        full_audio_path = os.path.join(audio_base_dir, audio_path)

        # Initialize text and language with default values
        text = ""
        language = item["data"].get("language", "")  # Fallback to data.language if not in annotations

        # Get transcription and language from annotations
        if item["annotations"] and "result" in item["annotations"][0]:
            for result in item["annotations"][0]["result"]:
                if result.get("to_name") == "audio":
                    if result.get("from_name") == "transcription" and "text" in result.get("value", {}):
                        text = result["value"]["text"][0]
                    elif result.get("from_name") == "language" and "text" in result.get("value", {}):
                        language = result["value"]["text"][0]

        # Create dataset row
        dataset_rows.append(
            {
                "audio": full_audio_path,
                "sentence": text,
                "language": language,  # Added language field
            }
        )

    # Shuffle the dataset rows
    random.shuffle(dataset_rows)

    # Calculate split sizes with maximum caps
    total = len(dataset_rows)

    # Determine valid and test sizes (capped at max_valid_size and max_test_size)
    valid_size = min(int(total * valid_ratio), max_valid_size)
    test_size = min(int(total * (1 - train_ratio - valid_ratio)), max_test_size)

    # The rest goes to training
    train_size = total - valid_size - test_size

    # Split the data
    train_rows = dataset_rows[:train_size]
    valid_rows = dataset_rows[train_size : train_size + valid_size]
    test_rows = dataset_rows[
        train_size + valid_size : train_size + valid_size + test_size
    ]

    # Create features with language field
    features = Features(
        {
            "audio": Value("string"),
            "sentence": Value("string"),
            "language": Value("string"),  # Added language feature
        }
    )

    # Create datasets for each split
    train_dataset = Dataset.from_dict(
        {k: [dic[k] for dic in train_rows] for k in train_rows[0]}, features=features
    )
    valid_dataset = Dataset.from_dict(
        {k: [dic[k] for dic in valid_rows] for k in valid_rows[0]}, features=features
    )
    test_dataset = Dataset.from_dict(
        {k: [dic[k] for dic in test_rows] for k in test_rows[0]}, features=features
    )

    # Add audio feature to each split
    train_dataset = train_dataset.cast_column("audio", Audio())
    valid_dataset = valid_dataset.cast_column("audio", Audio())
    test_dataset = test_dataset.cast_column("audio", Audio())

    # Combine into a DatasetDict
    dataset_dict = DatasetDict(
        {"train": train_dataset, "validation": valid_dataset, "test": test_dataset}
    )

    # Save dataset
    dataset_dict.save_to_disk(output_path)

    print(f"Train split: {len(train_rows)} examples")
    print(f"Validation split: {len(valid_rows)} examples")
    print(f"Test split: {len(test_rows)} examples")

    return dataset_dict