from transformers import (
    AutoTokenizer,
    AutoModelForSeq2SeqLM,
    M2M100ForConditionalGeneration,
    M2M100Tokenizer,
    pipeline,
    VitsModel,
    PreTrainedTokenizerBase,
    VitsTokenizer,
)
import numpy as np
import logging
import torch
import os, platform, io
import threading
from faster_whisper import WhisperModel
import whisper
from ai import NlpAbsModel, VitsAbsModel, ASRTranslate
from config import settings
from huggingface_hub import snapshot_download
import ffmpeg
from typing import Union

logger = logging.getLogger(__name__)


class NLLBModel(NlpAbsModel):

    def __init__(self, model: AutoModelForSeq2SeqLM, tokenizer: AutoTokenizer, device: str):
        self.model: AutoModelForSeq2SeqLM = model
        self.tokenizer: AutoTokenizer = tokenizer
        self.device = device

    def translation(self, prompt: str, language: str = "English") -> str:
        lang_code = NLLBModel.nllb_lang_code(language)
        logger.debug("[NLLB]input:%s, output:%s", language, lang_code)

        inputs = self.tokenizer(prompt, return_tensors="pt").to(self.device)
        # eng_Latn
        # the `lang_code_to_id` attribute is deprecated. The logic is natively handled in the `tokenizer.adder_tokens_decoder` this attribute will be removed in `transformers` v4.38
        translated_tokens = self.model.generate(
            **inputs,
            forced_bos_token_id=self.tokenizer.convert_tokens_to_ids(lang_code),
            max_length=300,
        )
        output = self.tokenizer.batch_decode(translated_tokens, skip_special_tokens=True)[0]
        return output

    def batch_translation(self, prompts: list[str], language: str = "English") -> list[str]:
        lang_code = NLLBModel.nllb_lang_code(language)
        nllb = pipeline(
            task="translation",
            model=self.model,
            tokenizer=self.tokenizer,
            src_lang="zho_Hans",
            tgt_lang=lang_code,
            device=self.device,
        )
        return [rs["translation_text"] for rs in nllb(prompts) if "translation_text" in rs]

    def getDevice(self) -> str:
        return self.device

    # 将English转换为:eng_Latn
    # see: https://github.com/facebookresearch/flores/blob/main/flores200/README.md#languages-in-flores-200
    @staticmethod
    def nllb_lang_code(language: str) -> str:
        """
        将语言名称转换为NLLB模型使用的语言代码
        Args:
            language (str): 语言名称. 例如: English, Thai, Arabic

        Returns:
            str: NLLB模型使用的语言代码. 例如: eng_Latn, tha_Thai, arb_Arab
        """
        langMapper = {"English": "eng_Latn", "Thai": "tha_Thai", "Arabic": "arb_Arab"}
        return langMapper.get(language, "eng_Latn")


class M2M100Model(NlpAbsModel):

    def __init__(self, model: M2M100ForConditionalGeneration, tokenizer: M2M100Tokenizer, device: str,):
        self.model: M2M100ForConditionalGeneration = model
        self.tokenizer: M2M100Tokenizer = tokenizer
        self.device = device

    def translation(self, prompt: str, language: str = "English") -> str:
        lang_code = M2M100Model.m2m_lang_code(language)
        logger.debug("[M2M]input:%s, output:%s", language, lang_code)

        encoded_text = self.tokenizer(prompt, return_tensors="pt").to(self.device)
        generated_tokens = self.model.generate(
            **encoded_text, forced_bos_token_id=self.tokenizer.get_lang_id(lang_code)
        )
        result = self.tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)
        return result[0]

    def batch_translation(self, prompts: list[str], language: str = "English") -> list[str]:
        lang_code = M2M100Model.m2m_lang_code(language)
        m2m100 = pipeline(
            task="translation",
            model=self.model,
            tokenizer=self.tokenizer,
            src_lang="zh",
            tgt_lang=lang_code,
            device=self.device,
        )
        return [rs["translation_text"] for rs in m2m100(prompts) if "translation_text" in rs]

    def getDevice(self) -> str:
        return self.device

    # 将English转换为:en
    # see: https://huggingface.co/facebook/m2m100_418M
    @staticmethod
    def m2m_lang_code(language: str) -> str:
        """
        Afrikaans (af), Amharic (am), Arabic (ar), Asturian (ast), Azerbaijani (az), Bashkir (ba), Belarusian (be),
        Bulgarian (bg), Bengali (bn), Breton (br), Bosnian (bs), Catalan; Valencian (ca), Cebuano (ceb), Czech (cs),
        Welsh (cy), Danish (da), German (de), Greeek (el), English (en), Spanish (es), Estonian (et), Persian (fa),
        Fulah (ff), Finnish (fi), French (fr), Western Frisian (fy), Irish (ga), Gaelic; Scottish Gaelic (gd), Galician (gl),
        Gujarati (gu), Hausa (ha), Hebrew (he), Hindi (hi), Croatian (hr), Haitian; Haitian Creole (ht), Hungarian (hu),
        Armenian (hy), Indonesian (id), Igbo (ig), Iloko (ilo), Icelandic (is), Italian (it), Japanese (ja), Javanese (jv),
        Georgian (ka), Kazakh (kk), Central Khmer (km), Kannada (kn), Korean (ko), Luxembourgish; Letzeburgesch (lb), Ganda (lg),
        Lingala (ln), Lao (lo), Lithuanian (lt), Latvian (lv), Malagasy (mg), Macedonian (mk), Malayalam (ml), Mongolian (mn),
        Marathi (mr), Malay (ms), Burmese (my), Nepali (ne), Dutch; Flemish (nl), Norwegian (no), Northern Sotho (ns),
        Occitan (post 1500) (oc), Oriya (or), Panjabi; Punjabi (pa), Polish (pl), Pushto; Pashto (ps), Portuguese (pt),
        Romanian; Moldavian; Moldovan (ro), Russian (ru), Sindhi (sd), Sinhala; Sinhalese (si), Slovak (sk), Slovenian (sl),
        Somali (so), Albanian (sq), Serbian (sr), Swati (ss), Sundanese (su), Swedish (sv), Swahili (sw), Tamil (ta), Thai (th),
        Tagalog (tl), Tswana (tn), Turkish (tr), Ukrainian (uk), Urdu (ur), Uzbek (uz), Vietnamese (vi), Wolof (wo), Xhosa (xh),
        Yiddish (yi), Yoruba (yo), Chinese (zh), Zulu (zu)
        """
        langMapper = {"English": "en", "Thai": "th", "Arabic": "ar"}
        return langMapper.get(language, "en")


# 使用Huggingface的实现
class VitsHuggingModel(VitsAbsModel):

    def __init__(self, model: VitsModel, tokenizer: PreTrainedTokenizerBase, device: str):
        self.model = model
        self.tokenizer = tokenizer
        self.device = device

    # 结果数组: numpy array struct print, length:1, sharp:(1, 111104), type:float32
    def generateNpArr(self, prompt: str) -> np.ndarray:
        logger.debug(f"VHM-1: arg prompt: {prompt}")
        if prompt is None:
            logger.debug("VHM-2: return Mute")
            #
            raise ValueError("prompt is Lost")
        else:
            logger.debug("VHM-3: generate tts numpy array")
            inputs = self.tokenizer(prompt, return_tensors="pt").to(self.device)
            #
            with torch.no_grad():
                #
                output = self.model(**inputs).waveform
                return output.cpu().numpy()

    def getDevice(self) -> str:
        return self.device


# 使用faster-whisper
class FasterWhisperTranslate(ASRTranslate):

    def __init__(self, fasterModel: WhisperModel, enableVAD: bool = True):
        """
        初始化
        Args:
            fasterModel (WhisperModel): _description_
            enableVAD (bool, optional): 是否进行VAD人声检测. Defaults to True.
        """
        # Initializing libiomp5md.dll, but found libiomp5md.dll already initialized.
        if platform.system() == "Windows":
            os.environ["KMP_DUPLICATE_LIB_OK"] = "True"

        self.model: WhisperModel = fasterModel
        self.lockRIns = threading.RLock()
        self.silero_vad_model = self.__loadVADModel() if enableVAD else None

    # 音频文件翻译(av.audio.resampler.AudioResampler)
    def transcribe(self, path: str) -> str:
        logger.debug("F7.0) asr wav file")
        self.lockRIns.acquire()
        if self.silero_vad_model and not self.__vad_man_sound(path):
            return ""

        segments, _ = self.model.transcribe(
            path,
            language="zh",
            vad_filter=True,
            vad_parameters=dict(min_silence_duration_ms=1000),
        )
        data = "".join([segment.text for segment in segments])
        self.lockRIns.release()
        logger.debug("F7.1) asr statement:%s", data)
        return data

    # numpy翻译.要求数组满足:
    # numpy array struct print, length:26624, sharp:(26624,), type:float32
    def transcribe_array(self, nyArr: np.ndarray) -> str:
        """
        numpy翻译.
        audio.py line69 audio.astype(np.float32) / 32768.0
        Unexpected input data type. Actual: (tensor(int16)) , expected: (tensor(float))
        Args:
            nyArr (np.ndarray): length:26624, sharp:(26624,), type:float32

        Returns:
            str: _description_
        """
        logger.debug("F7.0) asr numpy array")
        if self.silero_vad_model and not self.__vad_man_sound(nyArr):
            return ""

        self.lockRIns.acquire()
        segments, _ = self.model.transcribe(
            nyArr,
            language="zh",
            vad_filter=True,
            vad_parameters=dict(min_silence_duration_ms=1000),
        )
        data = "".join([segment.text for segment in segments])
        self.lockRIns.release()
        logger.debug("F7.1) asr statement:%s", data)
        return data

    # wav byte翻译(av.audio.resampler.AudioResampler)
    def transcribe_bytes(self, bydata: io.BytesIO) -> str:
        logger.debug("F7.0) asr wav file bytes")
        if self.silero_vad_model and not self.__vad_man_sound(bydata):
            return ""

        self.lockRIns.acquire()
        segments, _ = self.model.transcribe(
            bydata,
            language="zh",
            vad_filter=True,
            vad_parameters=dict(min_silence_duration_ms=1000),
        )
        data = "".join([segment.text for segment in segments])
        self.lockRIns.release()
        logger.debug("F7.1) asr statement:%s", data)
        return data

    def __loadVADModel(self):
        from silero_vad import load_silero_vad

        return load_silero_vad()

    # 空白检测/避免随机词出现
    def __vad_man_sound(self, audioForm: Union[str, np.ndarray, io.BytesIO], sample_rate: int = 16000) -> bool:
        """
        人声检测
        Args:
            path (str): _description_
            sample_rate (int) 音频采样率
        Returns:
            bool: 存在人声返回True
        """
        from silero_vad import read_audio, get_speech_timestamps

        if isinstance(audioForm, str):
            wav = read_audio(audioForm)  # 需要后端支持(sox, soundfile, 或 ffmpeg)
        elif isinstance(audioForm, np.ndarray):
            wav = torch.from_numpy(audioForm)
        elif isinstance(audioForm, io.BytesIO):
            npArr = WhisperTranslate.load_audio(audioForm.getvalue())
            wav = torch.from_numpy(npArr)
        else:
            raise RuntimeError
        # [3.15s -> 33.13s]
        """
        def get_speech_timestamps(audio: torch.Tensor,
                          model,
                          threshold: float = 0.5,
                          sampling_rate: int = 16000,
                          min_speech_duration_ms: int = 250,
                          max_speech_duration_s: float = float('inf'),
                          min_silence_duration_ms: int = 100,
                          speech_pad_ms: int = 30,
                          return_seconds: bool = False,
                          visualize_probs: bool = False,
                          progress_tracking_callback: Callable[[float], None] = None,
                          neg_threshold: float = None,
                          window_size_samples: int = 512,):

        """
        speech_timestamps = get_speech_timestamps(
            audio=wav,
            model=self.silero_vad_model,
            sampling_rate=sample_rate,
            return_seconds=True,
        )
        return speech_timestamps

    # 注销方法
    def _close(self):
        logger.debug("[FasterWhisperTranslate]close")


# 避免多线程出现张量异常. 需要保证使用过程中全程唯一实例
# 使用OpenAi的Whisper
class WhisperTranslate(ASRTranslate):

    def __init__(self, whisModel: whisper.model.Whisper, outChinese: bool = False):
        """
        初始化
        Args:
            whisModel (whisper.model.Whisper): _description_
            outChinese (bool, optional): 是否输出简体中文. Defaults to False.
        """
        self.model: whisper.model.Whisper = whisModel
        self.lockRIns = threading.RLock()
        # outChinese=true输出中文,可能是繁体的
        self.langDict = {"language": "Chinese"} if outChinese else {}

    # 音频文件翻译(Requires the ffmpeg CLI in PATH.)
    def transcribe(self, path: str) -> str:
        self.lockRIns.acquire()
        result = self.model.transcribe(path, fp16=False, **self.langDict)
        data = result["text"]
        self.lockRIns.release()
        return data

    # numpy翻译(torch.from_numpy)
    # numpy array struct print, length:26624, sharp:(26624,), type:float32
    def transcribe_array(self, nyArr: np.ndarray) -> str:
        """
        numpy翻译
        audio.py line47 np.frombuffer(out, np.int16).flatten().astype(np.float32) / 32768.0
        Args:
            nyArr (np.ndarray): length:26624, sharp:(26624,), type:float32

        Returns:
            str: _description_
        """
        self.lockRIns.acquire()
        result = self.model.transcribe(nyArr, word_timestamps=False, fp16=False, **self.langDict)
        data = result["text"]
        self.lockRIns.release()
        return data

    def transcribe_bytes(self, bydata: io.BytesIO) -> str:
        npArr = WhisperTranslate.load_audio(bydata.getvalue())
        return self.transcribe_array(npArr)

    def _close(self):
        self.lockRIns = None
        self.model = None

    # @see: https://github.com/openai/whisper/discussions/908
    @staticmethod
    def load_audio(file_bytes: bytes, sr: int = 16_000) -> np.ndarray:
        """
        Use file's bytes and transform to mono waveform, resampling as necessary
        Parameters
        ----------
        file: bytes
            The bytes of the audio file
        sr: int
            The sample rate to resample the audio if necessary
        Returns
        -------
        A NumPy array containing the audio waveform, in float32 dtype.
        """
        # 不显示ffmpeg的日志
        # @see: https://stackoverflow.com/questions/62968888/how-to-hide-console-output-of-ffmpeg-in-python
        try:
            # This launches a subprocess to decode audio while down-mixing and resampling as necessary.
            # Requires the ffmpeg CLI and `ffmpeg-python` package to be installed.
            out, _ = (
                ffmpeg.input("pipe:", threads=0)
                .output(
                    "pipe:",
                    format="s16le",
                    acodec="pcm_s16le",
                    ac=1,
                    ar=sr,
                    loglevel="quiet",
                )
                .run_async(pipe_stdin=True, pipe_stdout=True)
            ).communicate(input=file_bytes)

        except ffmpeg.Error as e:
            raise RuntimeError(f"Failed to load audio: {e.stderr.decode()}") from e

        return np.frombuffer(out, np.int16).flatten().astype(np.float32) / 32768.0


def check_hf_model(model_dir: str, hf_endpoint: str = "https://hf-mirror.com"):
    # @see: https://github.com/huggingface/huggingface_hub/issues/2179
    if platform.system() == "Windows" and os.environ.get("HF_HUB_DISABLE_SYMLINKS_WARNING", default=None) is None:
        os.environ["HF_HUB_DISABLE_SYMLINKS_WARNING"] = "true"

    # 目录下是否有文件
    # 目录名称结构: 'models--{username_or_org}--{repo_name}'
    object_id = model_dir.replace("/", "--")
    repo_cache = os.path.join(settings.MODEL_PATH, f"models--{object_id}")
    if not os.path.exists(repo_cache):
        hf_downurl = os.environ.get("HF_ENDPOINT", default=None)
        if hf_downurl is None:
            os.environ["HF_ENDPOINT"] = hf_endpoint

        snapshot_download(repo_id=model_dir)


def get_nlp_model(model_dir: str, model_alias: str, device: str = "cuda") -> NlpAbsModel | None:
    if model_alias == "nllb":
        model = AutoModelForSeq2SeqLM.from_pretrained(model_dir, trust_remote_code=True, cache_dir=settings.MODEL_PATH).to(device)
        tokenizer = AutoTokenizer.from_pretrained(
            model_dir,
            src_lang="zho_Hans",
            trust_remote_code=True,
            cache_dir=settings.MODEL_PATH,
        )
        return NLLBModel(model, tokenizer, device)
    elif model_alias == "m2m100":
        model = M2M100ForConditionalGeneration.from_pretrained(model_dir, cache_dir=settings.MODEL_PATH).to(device)
        tokenizer = M2M100Tokenizer.from_pretrained(model_dir, cache_dir=settings.MODEL_PATH)
        tokenizer.src_lang = "zh"
        return M2M100Model(model, tokenizer, device)
    else:
        return None


def get_tts_model(model_dir: str, model_alias: str, device: str = "cuda") -> VitsAbsModel | None:
    if model_alias == "mms":
        model = VitsModel.from_pretrained(model_dir, cache_dir=settings.MODEL_PATH).to(device)  # acebook/mms-tts-deu
        tokenizer = VitsTokenizer.from_pretrained(model_dir)
        return VitsHuggingModel(model, tokenizer, device)
    else:
        # https://huggingface.co/spaces/ankush13r/faster-whisper/blob/main/app.py
        return None


def get_asr_model(
    model_dir: str, model_alias: str, device: str = "cuda"
) -> ASRTranslate | None:
    # model_alias = whisper|faster-whisper
    if model_alias == "whisper":
        tmpDirectory = "models--{}".format(model_dir.replace("/", "--"))
        whisperModel = whisper.load_model("large-v3", download_root=f"{settings.MODEL_PATH}/{tmpDirectory}/", device=device,)
        return WhisperTranslate(whisperModel)
    elif model_alias == "faster-whisper":
        if platform.system() == "Windows":
            os.environ["KMP_DUPLICATE_LIB_OK"] = "True"
        fasterModel = WhisperModel(device=device,model_size_or_path=model_dir, download_root=settings.MODEL_PATH,)
        return FasterWhisperTranslate(fasterModel)
    else:
        # Could not locate cudnn_ops64_9.dll. Please make sure it is in your library path!
        # https://github.com/SYSTRAN/faster-whisper/issues/1080
        return None
