from .base import VoiceAssistant
import argparse
import os
import torch
import math
from loguru import logger
import torchaudio.compliance.kaldi as k
import librosa
from huggingface_hub import snapshot_download

# from src.models.src_osum.llm_asr.pipeline import inferencePipeline
from gxl_ai_utils.utils import utils_file
# try:
# from src.models.src_osum.utils.init_tokenizer import init_tokenizer
from gxl_ai_utils.config.gxl_config import GxlNode
# from src.models.src_osum.utils.init_model import init_model
import sys
sys.path.append(r"/mnt/sfs/asr/code/osum_xlgeng")
from wenet.utils.init_model import init_model
from wenet.utils.init_tokenizer import init_tokenizer
import logging
import torchaudio
# except ImportError:
#     pass
is_npu = True

try:
    import torch_npu
except ImportError:
    is_npu = False
    print("torch_npu is not available. if you want to use npu, please install it.")


def do_TTS(model, device, text_for_tts):
    input_prompt = "恳请将如下文本转换为其对应的语音token，力求生成最为流畅、自然的语音。"
    res_text = model.generate_tts(device=device, text=text_for_tts, prompt=input_prompt)[0]
    return res_text

def do_chat_text2text(model, device, text_for_chat):
    if isinstance(device, str):
        device = torch.device(f"npu:{int(device)}")
    res_text = model.generate_text2text(device=device, text=text_for_chat)[0]
    return res_text

class OsumTextAssistant(VoiceAssistant):
    def __init__(self):
        # checkpoint_path = "/mnt/sfs/asr/code/osum_xlgeng/examples/wenetspeech/whisper/exp/epoch_29_LLMinstruct_cosyvoice1_10Wtts_2Khqtts_3Ks2s_5Ws2t/step_57499.pt"
        # checkpoint_path = "/mnt/sfs/asr/ckpt/qwen2_multi_task_4_6gpus_gxl_adapter/epoch_30_LLMinstruct_cosyvoice1_10Wtts_1WenTTS_2Khqtts_1KenS2S_3Ks2s_5Ws2t/step_14999.pt"
        checkpoint_path = "/mnt/sfs/asr/ckpt/qwen2_multi_task_4_6gpus_gxl_adapter/epoch_31_LLMinstruct_cosyvoice1_10Wtts_1WenTTS_2Khqtts_1KenS2S_3Ks2s_5Ws2t/step_59999.pt"
        config_path = "/mnt/sfs/asr/code/osum_xlgeng/examples/wenetspeech/whisper/conf/config_llm_huawei_instruct-version_cosyvoice1-token.yaml"
        args = GxlNode({
        "checkpoint_path": checkpoint_path,
        })
        configs = utils_file.load_dict_from_yaml(config_path)
        self.model, configs = init_model(args, configs)
        self.tokenizer = init_tokenizer(configs)

    def generate_text(
            self,
            text,
            gpu_id,
    ):
        if is_npu:
            device = torch.device(f'npu:{gpu_id}')
        else:
            device = torch.device(f'cuda:{gpu_id}')
        model = self.model.to(device)
        response = do_chat_text2text(device=device, model=model, text_for_chat=text)
        return response
