import os

import click
from whisper_finetuning.prepare_label_studio import (
    export_label_studio_config,
    prepare_label_studio_data,
)
from whisper_finetuning.split_audio import split_audio
from whisper_finetuning.transcript import process_audio_files


@click.group()
def cli():
    """Command line interface for tari-whisper-fine-tuning"""
    pass


@cli.command()
@click.argument("input_path", type=click.Path(exists=True))
@click.argument("output_directory", type=click.Path())
@click.option(
    "--max-chunk-duration",
    default=10_000,
    help="Maximum chunk duration in milliseconds",
)
@click.option(
    "--min-silence-duration",
    default=900,
    help="Minimum silence duration in milliseconds",
)
def split_audio_cmd(
    input_path, output_directory, max_chunk_duration, min_silence_duration
):
    """Split audio file or directory into chunks based on silence detection"""
    if os.path.isfile(input_path):
        split_audio(
            input_path, output_directory, max_chunk_duration, min_silence_duration
        )
    elif os.path.isdir(input_path):
        from whisper_finetuning.split_audio import split_audio_directory

        split_audio_directory(
            input_path, output_directory, max_chunk_duration, min_silence_duration
        )
    else:
        raise click.BadParameter("Input path must be a file or directory")


@cli.command()
@click.argument("input_dir", type=click.Path(exists=True))
@click.argument("output_dir", type=click.Path())
@click.option(
    "--model-size",
    default="large-v3",
    help="Whisper model size (e.g., tiny, base, small, medium, large-v3)",
)
@click.option(
    "--device",
    default="cuda",
    type=click.Choice(["cuda", "cpu"]),
    help="Device to run on (cuda or cpu)",
)
@click.option(
    "--compute-type",
    default="float16",
    type=click.Choice(["float16", "int8_float16", "int8"]),
    help="Compute precision type",
)
@click.option(
    "--language",
    default=None,
    help="Audio language (if not specified, will auto-detect)",
)
def transcribe(input_dir, output_dir, model_size, device, compute_type, language):
    """Transcribe audio files using Whisper"""
    process_audio_files(
        input_dir,
        output_dir,
        model_size=model_size,
        device=device,
        compute_type=compute_type,
        language=language,
    )


@cli.command()
@click.argument("audio_dir", type=click.Path(exists=True))
@click.argument("transcription_dir", type=click.Path(exists=True))
@click.argument("output_file", type=click.Path())
@click.option(
    "--audio-url-prefix",
    default="/data/local-files/?d=",
    help="Prefix for audio URLs (e.g., 'http://localhost:8000/')",
)
@click.option(
    "--export-config",
    is_flag=True,
    help="Export recommended Label Studio configuration",
)
def prepare_for_label_studio(
    audio_dir, transcription_dir, output_file, audio_url_prefix, export_config
):
    """Prepare pre-annotated data for Label Studio"""
    prepare_label_studio_data(
        audio_dir, transcription_dir, output_file, audio_url_prefix
    )

    if export_config:
        config_file = os.path.join(
            os.path.dirname(output_file), "label_studio_config.xml"
        )
        with open(config_file, "w", encoding="utf-8") as f:
            f.write(export_label_studio_config())
        print(f"Exported Label Studio configuration to {config_file}")


@cli.command()
@click.argument("json_path", type=click.Path(exists=True))
@click.argument("audio_base_dir", type=click.Path(exists=True))
@click.argument("output_path", type=click.Path())
@click.option(
    "--train-ratio",
    default=0.8,
    type=float,
    help="Ratio of data to use for training",
)
@click.option(
    "--valid-ratio",
    default=0.1,
    type=float,
    help="Ratio of data to use for validation",
)
@click.option(
    "--max-valid-size",
    default=500,
    type=int,
    help="Maximum number of examples in validation set",
)
@click.option(
    "--max-test-size",
    default=500,
    type=int,
    help="Maximum number of examples in test set",
)
def convert_to_hf(
    json_path,
    audio_base_dir,
    output_path,
    train_ratio,
    valid_ratio,
    max_valid_size,
    max_test_size,
):
    """Convert Label Studio export to HuggingFace dataset format"""
    from whisper_finetuning.convert_to_hf_dataset import (
        convert_label_studio_to_hf_dataset,
    )

    dataset = convert_label_studio_to_hf_dataset(
        json_path,
        audio_base_dir,
        output_path,
        train_ratio,
        valid_ratio,
        max_valid_size,
        max_test_size,
    )
    print(f"Dataset saved to {output_path}")


@cli.command()
@click.argument("dataset_path", type=click.Path(exists=True))
@click.option(
    "--model-name-or-path",
    default="openai/whisper-large-v3",
    help="Pretrained model name or path",
)
@click.option(
    "--languages",
    multiple=True,
    help="Languages to include in training (can specify multiple). If not specified, will auto-detect all languages from dataset.",
)
@click.option(
    "--output-dir",
    default="./whisper-finetuned",
    help="Directory to save the fine-tuned model",
)
@click.option(
    "--max-samples-per-language",
    type=int,
    help="Maximum number of samples per language",
)
@click.option(
    "--train-batch-size",
    default=16,
    type=int,
    help="Training batch size",
)
@click.option(
    "--eval-batch-size",
    default=8,
    type=int,
    help="Evaluation batch size",
)
@click.option(
    "--gradient-accumulation-steps",
    default=1,
    type=int,
    help="Number of gradient accumulation steps",
)
@click.option(
    "--learning-rate",
    default=1e-5,
    type=float,
    help="Learning rate",
)
@click.option(
    "--warmup-steps",
    default=500,
    type=int,
    help="Number of warmup steps",
)
@click.option(
    "--num-train-epochs",
    default=3,
    type=int,
    help="Number of training epochs",
)
@click.option(
    "--max-steps",
    default=-1,
    type=int,
    help="Maximum training steps (-1 for no limit)",
)
@click.option(
    "--gradient-checkpointing/--no-gradient-checkpointing",
    default=True,
    help="Whether to use gradient checkpointing",
)
@click.option(
    "--fp16/--no-fp16",
    default=True,
    help="Whether to use mixed precision training",
)
@click.option(
    "--save-steps",
    default=500,
    type=int,
    help="Save checkpoint every N steps",
)
@click.option(
    "--eval-steps",
    default=500,
    type=int,
    help="Evaluate every N steps",
)
@click.option(
    "--logging-steps",
    default=25,
    type=int,
    help="Log every N steps",
)
@click.option(
    "--push-to-hub",
    is_flag=True,
    help="Whether to push to HuggingFace Hub",
)
def train(
    dataset_path,
    model_name_or_path,
    languages,
    output_dir,
    max_samples_per_language,
    train_batch_size,
    eval_batch_size,
    gradient_accumulation_steps,
    learning_rate,
    warmup_steps,
    num_train_epochs,
    max_steps,
    gradient_checkpointing,
    fp16,
    save_steps,
    eval_steps,
    logging_steps,
    push_to_hub,
):
    """Fine-tune Whisper model on custom dataset. 
    
    If no languages are specified, will automatically detect and train on all languages found in the dataset.
    """
    from whisper_finetuning.train_whisper import train_whisper
    
    # Convert languages tuple to list, or use None for auto-detection
    languages_list = list(languages) if languages else None
    
    trainer, test_results = train_whisper(
        dataset_path=dataset_path,
        model_name_or_path=model_name_or_path,
        output_dir=output_dir,
        languages=languages_list,
        max_samples_per_language=max_samples_per_language,
        train_batch_size=train_batch_size,
        eval_batch_size=eval_batch_size,
        gradient_accumulation_steps=gradient_accumulation_steps,
        learning_rate=learning_rate,
        warmup_steps=warmup_steps,
        num_train_epochs=num_train_epochs,
        max_steps=max_steps,
        gradient_checkpointing=gradient_checkpointing,
        fp16=fp16,
        save_steps=save_steps,
        eval_steps=eval_steps,
        logging_steps=logging_steps,
        push_to_hub=push_to_hub,
    )
    
    print(f"\nTraining completed! Model saved to {output_dir}")
    print(f"Final test WER: {test_results['eval_wer']:.2f}%")


if __name__ == "__main__":
    cli()