from services.storage import AiocacheMemory, BaseStorage
from ai import NlpAbsModel, VitsAbsModel, ModelMap, ASRTranslate
from ai.hugging import get_nlp_model, get_tts_model, get_asr_model, check_hf_model
import hashlib, logging
import numpy as np
import os

logger = logging.getLogger(__name__)

class AiModelCache:
    def __init__(self, cache_client: AiocacheMemory):
        self.__client: AiocacheMemory = cache_client

    async def get_nlpAbsModel(self, model_dir: str, model_alias: str, device: str = "cuda") -> NlpAbsModel | None:
        # km = self.get_hashed_name(model_dir).encode("utf-8")
        modelIns = await self.__client.get_key(model_dir, None)
        if modelIns is None:
            modelIns = get_nlp_model(model_dir, model_alias, device)
            await self.__client.set_key(model_dir, modelIns)

        return modelIns

    async def get_vitsAbsModel(self, model_dir: str, model_alias: str, device: str = "cuda") -> VitsAbsModel | None:
        # km = self.get_hashed_name(model_dir).encode("utf-8")
        modelIns = await self.__client.get_key(model_dir, None)
        if modelIns is None:
            modelIns = get_tts_model(model_dir, model_alias, device)
            await self.__client.set_key(model_dir, modelIns)

        return modelIns

    async def get_asrAbsModel(self, model_dir: str, model_alias: str, device: str = "cuda") -> ASRTranslate | None:
        modelIns = await self.__client.get_key(model_dir, None)
        if modelIns is None:
            modelIns = get_asr_model(model_dir, model_alias, device)
            await self.__client.set_key(model_dir, modelIns)

        return modelIns
    
    def get_hashed_name(self, name) -> str:
        return hashlib.sha256(name.encode()).hexdigest()
    
    async def load_model(self, device:str, **loadModelConfig)->None:
        # nlp_model_alias:str, vits_model_alias:str, vits_language:str='English',
        # {'nlp': 'nllb', 'tts': 'mms', 'asr': 'faster-whisper'} 
        nlp_model_alias = loadModelConfig.pop('nlp', None)

        #
        tts_model_alias = loadModelConfig.pop('tts', None)
        tts_language = loadModelConfig.pop('tts_language', 'English')
        asr_model_alias = loadModelConfig.pop('asr', None)
        #
        if nlp_model_alias:
            logger.info('load nlp model, for alias:%s', nlp_model_alias)
            nlp_model_dir = ModelMap.get(nlp_model_alias, "facebook/nllb-200-distilled-600M")
            check_hf_model(model_dir=nlp_model_dir)
            nlp_modelIns = get_nlp_model(nlp_model_dir, nlp_model_alias, device)
            await self.__client.set_key(nlp_model_dir, nlp_modelIns, None)
        #
        if tts_model_alias:
            logger.info('load tts model, for alias:%s, language:%s', tts_model_alias, tts_language)
            tts_model_dir = ModelMap.get(f"{tts_model_alias}-{tts_language}", "facebook/mms-tts-eng")
            check_hf_model(model_dir=tts_model_dir)
            tts_modelIns = get_tts_model(tts_model_dir, tts_model_alias, device)
            await self.__client.set_key(tts_model_dir, tts_modelIns, None)
        #
        if asr_model_alias:
            logger.info('load asr model, for alias:%s', asr_model_alias)
            asr_model_dir = ModelMap.get(asr_model_alias, "Systran/faster-whisper-large-v3")
            check_hf_model(model_dir=asr_model_dir)
            asr_modelIns = get_asr_model(asr_model_dir, asr_model_alias, device)
            await self.__client.set_key(asr_model_dir, asr_modelIns, None)
    
    def get_cache_client(self)->AiocacheMemory:
        return self.__client


def save_wav_tourl(nyArr: np.ndarray, storage: BaseStorage)->tuple[str, str]:
    # 将32位浮点转成16位整数, 适用于:16000(音频采样率)
    data = np.int16(nyArr / np.max(np.abs(nyArr)) * 32767)
    option: dict = {"rate": 16000, "channel": 1}
    saveFilePath, _ = storage.upload("voice", data.T, "wav", **option)
    # 本地的话补上本站的地址@20241129:path
    return storage.geturl(saveFilePath), os.path.basename(saveFilePath)[:-4]

def chat_stream_txt(prompt:str, language:str, nlp_model: NlpAbsModel, tts_model: VitsAbsModel, storage: BaseStorage)->dict:
    # {"text": 翻译后的文件, "language": 翻译后的语种, "path": tts后的语音路径, "filename": 文件名}
    nlpTxt = nlp_model.translation(prompt, language)
    nyArr: np.ndarray = tts_model.generateNpArr(nlpTxt)
    wav_url, filename = save_wav_tourl(nyArr, storage)
    # 本地的话补上本站的地址@20241129:path
    return {'text': nlpTxt, 'language': language, 'path': wav_url, 'filename': filename}

def chat_stream_voice(input_audio_file:str, language:str, asr_model: ASRTranslate, nlp_model: NlpAbsModel, tts_model: VitsAbsModel, storage: BaseStorage)->dict:
    prompt = asr_model.transcribe(input_audio_file)
    return chat_stream_txt(prompt, language, nlp_model, tts_model, storage)

def tts_stream(prompt:str, tts_model: VitsAbsModel,)->bytes:
    nyArr: np.ndarray = tts_model.generateNpArr(prompt)
    data = np.int16(nyArr / np.max(np.abs(nyArr)) * 32767)
    return data.tobytes()