from .base import VoiceAssistant
import argparse
import os
import torch
import torch_npu
import math
from loguru import logger
import torchaudio.compliance.kaldi as k
import librosa
from huggingface_hub import snapshot_download

from src.models.src_osum.llm_asr.pipeline import inferencePipeline
# import sys
# sys.path.append("/mnt/sfs/asr/code/osum_xlgeng")
# from wenet.utils.init_model import init_model
# from gxl_ai_utils.config.gxl_config import GxlNode
# from gxl_ai_utils.utils import utils_file

class audioProcessor:
    def __init__(self, chunk_size=16):
        self.n_fft = 400
        self.hop_length = 160
        self.num_mel_bins = 80
        self.padding = 0

    def get_feature(self,audio):
        sample_rate = audio['sampling_rate']
        waveform = torch.tensor(audio['array'])  # (channel=1, sample) -> (sample,)

        # sample_rate = 16000
        # waveform = torch.tensor(audio)  # (channel=1, sample) -> (sample,)
        if self.padding > 0:
            waveform = F.pad(waveform, (0, self.padding))
        window = torch.hann_window(self.n_fft)
        stft = torch.stft(waveform,
                          self.n_fft,
                          self.hop_length,
                          window=window,
                          return_complex=True)
        magnitudes = stft[..., :-1].abs() ** 2

        filters = torch.from_numpy(
            librosa.filters.mel(sr=sample_rate,
                                n_fft=self.n_fft,
                                n_mels=self.num_mel_bins))
        mel_spec = filters @ magnitudes.float()

        # NOTE(xcsong): https://github.com/openai/whisper/discussions/269
        log_spec = torch.clamp(mel_spec, min=1e-10).log10()
        log_spec = torch.maximum(log_spec, log_spec.max() - 8.0)
        log_spec = (log_spec + 4.0) / 4.0
        return log_spec.transpose(0, 1)


class OsumAssistant(VoiceAssistant):
    def __init__(self):
        configs = argparse.Namespace(
            config="/mnt/sfs/asr/code/osum_xlgeng/examples/wenetspeech/whisper/conf/config_llm_huawei_instruct-version_cosyvoice1-token.yaml",
            # checkpoint="/home/work_nfs16/asr_data/ckpt/understanding_model/epoch25_cosyvoice1_new-set_token_1w_plus-multi_task/step_12499.pt",
            # checkpoint="/mnt/sfs/asr/code/osum_xlgeng/examples/wenetspeech/whisper/exp/epoch_29_LLMinstruct_cosyvoice1_10Wtts_2Khqtts_3Ks2s_5Ws2t/step_57499.pt",
            checkpoint="/mnt/sfs/asr/ckpt/qwen2_multi_task_4_6gpus_gxl_adapter/epoch_30_LLMinstruct_cosyvoice1_10Wtts_1WenTTS_2Khqtts_1KenS2S_3Ks2s_5Ws2t/step_14999.pt",
            top_k=20,
            top_p=0.8,
            temperature=0.8,
        )
        self.pipeline = inferencePipeline(configs)

        # checkpoint_path = "/mnt/sfs/asr/ckpt/qwen2_multi_task_4_6gpus_gxl_adapter/epoch_30_LLMinstruct_cosyvoice1_10Wtts_1WenTTS_2Khqtts_1KenS2S_3Ks2s_5Ws2t/step_14999.pt"
        # config_path = "/mnt/sfs/asr/code/osum_xlgeng/examples/wenetspeech/whisper/conf/config_llm_huawei_instruct-version_cosyvoice1-token.yaml"
        # args = GxlNode({
        # "checkpoint_path": checkpoint_path,
        # })
        # configs = utils_file.load_dict_from_yaml(config_path)
        # self.model, configs = init_model(args, configs)


        self.audio_processor = audioProcessor()

    def generate_audio(
        self,
        audio,
        gpu_id,
    ):
        try:
            device = torch.device(f'npu:{gpu_id}')
            # model = self.model.to(device)
            # feature = self.audio_processor.get_feature(audio).to(device).transpose(0, 1).transpose(1, 2)
            feature = self.audio_processor.get_feature(audio)
            # print(feature.shape) torch.Size([1, 252, 80])
            #prompt = "首先将语音转录为文字，然后对语音内容进行回复，转录和文字之间使用<开始回答>分割。"
            prompt = "根据语音输入，直接以文字形式进行回答或对话。"
            # response = self.pipeline.model.generate(wavs=feature.to(device), wavs_len=torch.tensor([feature.size(1)]).to(device), prompt=prompt)
            response = self.pipeline.model.generate(wavs=feature.unsqueeze(0).to(device), wavs_len=torch.tensor([feature.size(0)]).to(device), prompt=prompt)
            # print("result:",response)

            return response
        except Exception as e:
            logger.error(e)
            return ""
