import click
import os
from tts_eval.entrypoints.generate_png import generate_radar_chart
from tts_eval.entrypoints.generate_tts_evaluation_data import generate_all_data_to_excel
from tts_eval.providers.openai import OpenAIProvider
from tts_eval.providers.humeai import HumeAIProvider
from tts_eval.providers.deepspeech import DeepSpeechProvider


@click.group()
def cli():
    """Command line interface for TTS evaluation tools."""
    pass


@cli.command("generate-data")
@click.option(
    "--output",
    "-o",
    default="tts_evaluation_results.xlsx",
    help="Output Excel file path",
)
def generate_data(output):
    """Generate TTS evaluation raw data and save to Excel file."""
    generate_all_data_to_excel(output_file=output)


@cli.command("generate-chart")
@click.argument("data_file", type=click.Path(exists=True))
@click.option(
    "--output", "-o", default="radar_chart.png", help="Output image file path"
)
def chart(data_file, output):
    """Generate a radar chart from TTS evaluation data file."""
    generate_radar_chart(data_file, output)


@cli.command("synthesize")
@click.option("--provider", "-p", type=click.Choice(["openai", "humeai", "qwen"]), required=True, help="TTS provider to use")
@click.option("--text", "-t", help="Text to synthesize")
@click.option("--input-file", "-i", type=click.Path(exists=True), help="Input text file (one utterance per line)")
@click.option("--output", "-o", required=True, help="Output file path or directory")
@click.option("--voice", help="Voice to use (for OpenAI or Qwen)")
@click.option("--voice-description", help="Voice description (for HumeAI)")
@click.option("--model", default="tts-1", help="Model name (for OpenAI)")
@click.option("--instructions", help="Voice instructions (for OpenAI)")
@click.option("--audio-format", default="mp3", help="Audio format")
@click.option("--api-key", help="API key for the selected provider")
def synthesize(provider, text, input_file, output, voice, voice_description, model, instructions, audio_format, api_key):
    """Synthesize text to speech using specified provider."""
    if not text and not input_file:
        raise click.UsageError("Either --text or --input-file must be provided")
    
    if text and input_file:
        raise click.UsageError("Only one of --text or --input-file should be provided")
    
    if provider == "openai":
        tts_provider = OpenAIProvider()
        if text:
            tts_provider.synthesize_text(
                text=text,
                output_file=output,
                voice=voice or "alloy",
                model=model,
                instructions=instructions
            )
            click.echo(f"Audio generated at: {output}")
        else:
            output_files = tts_provider.synthesize_from_file(
                input_file=input_file,
                output_dir=output,
                voice=voice or "alloy",
                model=model,
                instructions=instructions,
                audio_format=audio_format
            )
            click.echo(f"Generated {len(output_files)} audio files in: {output}")

    elif provider == "humeai":
        tts_provider = HumeAIProvider(api_key=api_key)
        if text:
            tts_provider.synthesize_text(
                text=text,
                output_file=output,
                voice_description=voice_description,
                audio_format=audio_format
            )
            click.echo(f"Audio generated at: {output}")
        else:
            output_files = tts_provider.synthesize_from_file(
                input_file=input_file,
                output_dir=output,
                voice_description=voice_description,
                audio_format=audio_format
            )
            click.echo(f"Generated {len(output_files)} audio files in: {output}")
            
    elif provider == "qwen":
        from tts_eval.providers.tts import QwenTTSProvider
        tts_provider = QwenTTSProvider(api_key=api_key)
        if text:
            tts_provider.synthesize_text(
                text=text,
                output_file=output,
                voice=voice,
                audio_format=audio_format
            )
            click.echo(f"Audio generated at: {output}")
        else:
            output_files = tts_provider.synthesize_from_file(
                input_file=input_file,
                output_dir=output,
                voice=voice,
                audio_format=audio_format
            )
            click.echo(f"Generated {len(output_files)} audio files in: {output}")


@cli.command("transcribe")
@click.option("--provider", "-p", type=click.Choice(["deepspeech"]), required=True, help="STT provider to use")
@click.option("--audio", "-a", type=click.Path(exists=True), required=True, help="Audio file to transcribe")
@click.option("--output", "-o", help="Output text file path")
@click.option("--model", required=True, help="Path to DeepSpeech model file (.pbmm)")
@click.option("--scorer", help="Path to DeepSpeech scorer file (.scorer)")
def transcribe(provider, audio, output, model, scorer):
    """Transcribe speech to text using specified provider."""
    if provider == "deepspeech":
        deepspeech_provider = DeepSpeechProvider(
            model_path=model,
            scorer_path=scorer
        )
        
        with open(audio, 'rb') as audio_file:
            audio_bytes = audio_file.read()
            
        # In a real implementation, you would determine the actual sample rate of the audio file
        # For simplicity, we're using a common sample rate
        sample_rate = 16000
        
        transcription = deepspeech_provider.speech_to_text(
            audio_bytes=audio_bytes,
            sample_rate=sample_rate
        )
        
        if output:
            with open(output, 'w') as output_file:
                output_file.write(transcription)
            click.echo(f"Transcription saved to: {output}")
        else:
            click.echo(f"Transcription: {transcription}")


def main():
    """Entry point for the TTS evaluation application."""
    cli()


if __name__ == "__main__":
    main()