from abc import ABC, abstractmethod
import io
import numpy as np
import scipy.io.wavfile
from typing import Dict, Optional, Tuple, Literal
import torch
import time
from ipex_llm import optimize_model
from transformers import SpeechT5Processor, SpeechT5HifiGan, SpeechT5ForTextToSpeech
from datasets import load_dataset
import soundfile as sf
import logging
from pydub import AudioSegment
from config import config
from kokoro_tts import KokoroTTS, TTSConfig
from langdetect import detect
import re


logger = logging.getLogger("llm-service")

class AudioConverter:
    """Audio format conversion utility"""
    
    @staticmethod
    def convert_format(
        wav_data: bytes,
        target_format: Literal["mp3", "opus", "aac", "flac"],
        sampling_rate: int
    ) -> Tuple[bytes, str]:
        """Convert audio data to target format"""
        try:
            # Load WAV data
            audio = AudioSegment.from_wav(io.BytesIO(wav_data))
            
            # Set format-specific parameters
            format_params = {
                "mp3": {"format": "mp3", "codec": "libmp3lame", "mime": "audio/mpeg"},
                "opus": {"format": "opus", "codec": "libopus", "mime": "audio/opus"},
                "aac": {"format": "adts", "codec": "aac", "mime": "audio/aac"},
                "flac": {"format": "flac", "codec": "flac", "mime": "audio/flac"}
            }
            
            params = format_params[target_format]
            
            # Convert to target format
            output = io.BytesIO()
            audio.export(
                output,
                format=params["format"],
                codec=params["codec"],
                parameters=["-ar", str(sampling_rate)]
            )
            output.seek(0)
            
            return output.getvalue(), params["mime"]
        except Exception as e:
            logger.error(f"Error converting audio format: {str(e)}")
            return wav_data, "audio/wav"

class BaseTTS(ABC):
    """Base class for TTS implementations"""
    
    @abstractmethod
    def load_model(self):
        """Load the TTS model"""
        pass
    
    @abstractmethod
    def generate_speech(
        self, 
        text: str, 
        voice: str,
        output_format: Literal["mp3", "opus", "aac", "flac"] = "mp3"
    ) -> Tuple[bytes, str]:
        """Generate speech from text"""
        pass
    
    @property
    @abstractmethod
    def sampling_rate(self) -> int:
        """Get the sampling rate of the model"""
        pass

class SpeechT5TTS(BaseTTS):
    """SpeechT5 TTS implementation"""
    
    def __init__(self):
        self.model = None
        self.processor = None
        self.vocoder = None
        self.embeddings_dataset = None
        self._sampling_rate = 16000
        
        # OpenAI voice to CMU-Arctic speaker mapping
        self.voice_mapping = {
            "alloy": 0,    # bdl (male)
            "echo": 1,     # clb (female)
            "fable": 2,    # jmk (male)
            "onyx": 3,     # ksp (male)
            "nova": 4,     # rms (male)
            "shimmer": 5,  # slt (female)
        }
    
    def load_model(self):
        logger.info("Loading SpeechT5 TTS models...")
        self.model = SpeechT5ForTextToSpeech.from_pretrained(config.TTS_MODEL_PATH)
        self.processor = SpeechT5Processor.from_pretrained(config.TTS_MODEL_PATH)
        self.vocoder = SpeechT5HifiGan.from_pretrained(config.TTS_VOCODER_PATH)
        self.embeddings_dataset = load_dataset(config.EMBEDDINGS_DATASET_NAME, split="validation")

        # With only one line to enable IPEX-LLM optimization on model
        # Skip optimizing these two modules to get higher audio quality
        # When running LLMs on Intel iGPUs for Windows users, we recommend setting `cpu_embedding=True` in the optimize_model function.
        # This will allow the memory-intensive embedding layer to utilize the CPU instead of iGPU.
        self.model = optimize_model(self.model, modules_to_not_convert=["speech_decoder_postnet.feat_out",
                                                            "speech_decoder_postnet.prob_out"]) 
        logger.info("SpeechT5 models loaded successfully")
    
    def _get_speaker_embedding(self, voice: str) -> np.ndarray:
        speaker_id = self.voice_mapping.get(voice, 0)
        return self.embeddings_dataset[speaker_id]["xvector"]
    
    def generate_speech(
        self, 
        text: str, 
        voice: str,
        output_format: Literal["mp3", "opus", "aac", "flac"] = "mp3"
    ) -> Tuple[bytes, str]:
        speaker_embeddings = torch.tensor(self._get_speaker_embedding(voice)).unsqueeze(0)
        inputs = self.processor(text=text, return_tensors="pt")
        
        with torch.inference_mode():
            st = time.time()
            speech = self.model.generate_speech(
                inputs["input_ids"],
                speaker_embeddings=speaker_embeddings,
                vocoder=self.vocoder
            )
            end = time.time()
            logger.info(f"Inference time: {end-st} s")
        
        # First save as WAV
        buffer = io.BytesIO()
        scipy.io.wavfile.write(buffer, rate=self.sampling_rate, data=speech.numpy())
        buffer.seek(0)
        wav_data = buffer.getvalue()
        
        # Convert to target format
        if output_format != "wav":
            return AudioConverter.convert_format(wav_data, output_format, self.sampling_rate)
        
        return wav_data, "audio/wav"
    
    @property
    def sampling_rate(self) -> int:
        return self._sampling_rate

class KokoroTTSWrapper(BaseTTS):
    """Kokoro TTS implementation"""
    
    def __init__(self):
        self.model = None
        self._sampling_rate = 24000
    
    def _detect_language(self, text: str) -> str:
        """
        检测文本语言，返回 'zh' 或 'en'
        """
        try:
            # 检查是否包含中文字符
            if re.search(r'[\u4e00-\u9fff]', text):
                return 'zh'
            
            # 使用 langdetect 进行语言检测
            lang = detect(text)
            if lang == 'zh-cn' or lang == 'zh-tw':
                return 'zh'
            return 'en'
        except Exception as e:
            logger.warning(f"Language detection failed: {str(e)}, fallback to 'auto'")
            return 'auto'
    
    def load_model(self):
        logger.info("Loading Kokoro TTS model...")
        tts_config = TTSConfig(
            model_path=config.KOKORO_MODEL_PATH,
            device="cpu"  # 或 "cuda" 如果使用 GPU
        )
        self.model = KokoroTTS(tts_config)
        logger.info("Kokoro TTS model loaded successfully")
    
    def generate_speech(
        self, 
        text: str, 
        voice: str,
        output_format: Literal["mp3", "opus", "aac", "flac"] = "mp3"
    ) -> Tuple[bytes, str]:
        try:
            # 检测语言
            detected_lang = self._detect_language(text)
            logger.info(f"Detected language: {detected_lang} for text: {text[:50]}...")
            
            # 根据语言选择声音映射
            voice_map = config.VOICE_MAPPINGS["kokoro_zh" if detected_lang == "zh" else "kokoro"]
            kokoro_voice = voice_map.get(voice, voice_map["default"])
            logger.info(f"Using Kokoro voice: {kokoro_voice} for OpenAI voice: {voice}")
            
            # 生成音频
            st = time.time()
            audio_array = self.model.generate_speech(
                text,
                speaker=kokoro_voice,
                language=detected_lang
            )
            end = time.time()
            logger.info(f"Inference time: {end-st} s")
            
            # 转换为字节
            buffer = io.BytesIO()
            scipy.io.wavfile.write(buffer, rate=self.sampling_rate, data=audio_array)
            buffer.seek(0)
            wav_data = buffer.getvalue()
            
            # 转换为目标格式
            if output_format != "wav":
                return AudioConverter.convert_format(wav_data, output_format, self.sampling_rate)
            
            return wav_data, "audio/wav"
            
        except Exception as e:
            logger.error(f"Error generating speech with Kokoro: {str(e)}")
            raise
    
    @property
    def sampling_rate(self) -> int:
        return self._sampling_rate

class TTSFactory:
    """Factory class for creating TTS instances"""
    
    _instances: Dict[str, BaseTTS] = {}
    
    @classmethod
    def get_tts(cls, model_name: str = "kokoro") -> BaseTTS:
        """Get TTS instance based on model name"""
        if model_name not in cls._instances:
            if model_name == "kokoro":
                tts = KokoroTTSWrapper()
            elif model_name == "speecht5":
                tts = SpeechT5TTS()
            else:
                logger.warning(f"Unknown model {model_name}, falling back to Kokoro")
                tts = KokoroTTSWrapper()
            
            tts.load_model()
            cls._instances[model_name] = tts
        
        return cls._instances[model_name]
