import whisper
import torch
from pathlib import Path
from typing import List, Union
import numpy as np
from transformers import pipeline
import os
from datasets import load_dataset
import re

class AudioTranslator:
    def __init__(self):
        """Initialize the audio translator with required models"""
        # Load Whisper model for speech recognition
        self.whisper_model = whisper.load_model("base")

        # Initialize translation pipeline
        self.translator = pipeline(
            "translation",
            model="facebook/nllb-200-distilled-600M",
            device=0 if torch.cuda.is_available() else -1
        )

        # Initialize text-to-speech pipeline with a different model
        self.tts = pipeline(
            "text-to-speech",
            model="microsoft/speecht5_tts",
            device=0 if torch.cuda.is_available() else -1
        )
        
        # Load speaker embeddings
        embeddings_dataset = load_dataset("Matthijs/cmu-arctic-xvectors", split="validation")
        self.speaker_embeddings = torch.tensor(embeddings_dataset[7306]["xvector"]).unsqueeze(0)
        
        # Maximum sequence length for TTS model
        self.max_text_length = 500  # Setting slightly lower than 600 for safety
        
        # Language code mapping for NLLB model
        self.lang_code_map = {
            "en": "eng_Latn",
            "zh": "zho_Hans",
            "es": "spa_Latn",
            "fr": "fra_Latn",
            "de": "deu_Latn",
            "it": "ita_Latn",
            "ja": "jpn_Jpan",
            "ko": "kor_Hang",
            "ru": "rus_Cyrl",
            "ar": "arb_Arab",
            "hi": "hin_Deva",
            "pt": "por_Latn",
        }

    def split_text_into_chunks(self, text: str) -> List[str]:
        """
        Split text into chunks that are small enough for the TTS model to handle.
        Tries to split at sentence boundaries when possible.
        
        Args:
            text: Input text to split
            
        Returns:
            List of text chunks
        """
        # First split by sentences
        sentences = re.split('([.!?]+)', text)
        sentences = [''.join(i) for i in zip(sentences[0::2], sentences[1::2] + [''])]
        sentences = [s.strip() for s in sentences if s.strip()]

        chunks = []
        current_chunk = ""

        for sentence in sentences:
            # If adding this sentence would make the chunk too long
            if len(current_chunk) + len(sentence) > self.max_text_length:
                # If the current chunk is not empty, add it to chunks
                if current_chunk:
                    chunks.append(current_chunk.strip())
                
                # If the sentence itself is too long, split it by words
                if len(sentence) > self.max_text_length:
                    words = sentence.split()
                    current_chunk = ""
                    for word in words:
                        if len(current_chunk) + len(word) + 1 > self.max_text_length:
                            chunks.append(current_chunk.strip())
                            current_chunk = word
                        else:
                            current_chunk += " " + word if current_chunk else word
                else:
                    current_chunk = sentence
            else:
                current_chunk += " " + sentence if current_chunk else sentence

        # Add the last chunk if it's not empty
        if current_chunk:
            chunks.append(current_chunk.strip())

        return chunks

    def synthesize_speech(
        self,
        text: str,
        output_path: Union[str, Path]
    ) -> Path:
        """
        Convert text to speech

        Args:
            text: Input text
            output_path: Path to save the audio file

        Returns:
            Path to the generated audio file
        """
        output_path = Path(output_path)
        
        # Split text into chunks
        text_chunks = self.split_text_into_chunks(text)
        
        # Generate speech for each chunk
        audio_chunks = []
        for chunk in text_chunks:
            try:
                # Get the audio from TTS pipeline
                result = self.tts(chunk, forward_params={"speaker_embeddings": self.speaker_embeddings})
                if isinstance(result, dict):
                    speech = result["audio"]
                elif isinstance(result, list) and result:
                    speech = result[0]["audio"]
                else:
                    raise ValueError(f"Unexpected TTS output format: {type(result)}")
                    
                audio_chunks.append(speech)
            except Exception as e:
                print(f"Error processing chunk: {chunk[:50]}...")
                print(f"Error details: {str(e)}")
                continue
        
        # Concatenate all audio chunks
        if audio_chunks:
            combined_audio = np.concatenate(audio_chunks)
        else:
            raise ValueError("No audio was generated from the text")

        # Save audio file
        import soundfile as sf
        sf.write(str(output_path), combined_audio, samplerate=16000)

        return output_path

    def transcribe_audio(self, audio_path: Union[str, Path]) -> str:
        """
        Transcribe audio to text

        Args:
            audio_path: Path to the audio file

        Returns:
            Transcribed text
        """
        result = self.whisper_model.transcribe(str(audio_path))
        return result["text"]

    def translate_text(self, text: str, target_language: str) -> str:
        """
        Translate text to target language

        Args:
            text: Input text
            target_language: Target language code (e.g., 'en', 'zh', 'es')

        Returns:
            Translated text
        """
        try:
            # Convert language code to NLLB format
            src_lang = "eng_Latn"  # Source is always English from Whisper
            tgt_lang = self.lang_code_map.get(target_language.lower())
            
            if not tgt_lang:
                raise ValueError(f"Unsupported target language: {target_language}")
            
            result = self.translator(
                text,
                src_lang=src_lang,
                tgt_lang=tgt_lang,
                max_length=512
            )
            
            # Handle different possible output formats
            if isinstance(result, list) and result:
                if "translation_text" in result[0]:
                    return result[0]["translation_text"]
                elif "generated_text" in result[0]:
                    return result[0]["generated_text"]
            elif isinstance(result, dict):
                if "translation_text" in result:
                    return result["translation_text"]
                elif "generated_text" in result:
                    return result["generated_text"]
            
            # If we can't find the translation in the expected format, return the raw result
            return str(result[0]) if isinstance(result, list) and result else str(result)
            
        except Exception as e:
            print(f"Translation error: {str(e)}")
            print(f"Input text: {text[:100]}...")
            raise

    def translate_and_synthesize(
        self,
        audio_path: Union[str, Path],
        target_language: str
    ) -> Path:
        """
        Translate audio and generate speech in target language

        Args:
            audio_path: Path to the input audio file
            target_language: Target language code

        Returns:
            Path to the translated audio file
        """
        # Transcribe original audio
        text = self.transcribe_audio(audio_path)

        # Translate text
        translated_text = self.translate_text(text, target_language)

        # Generate speech in target language
        audio_path = Path(audio_path)
        output_path = audio_path.parent / f"translated_{audio_path.name}"

        return self.synthesize_speech(translated_text, output_path)