from src.models.base import VoiceAssistant
import argparse
import os
import torch
import math
from loguru import logger
import torchaudio.compliance.kaldi as k
import librosa
import torchaudio
from gxl_ai_utils.config.gxl_config import GxlNode
from gxl_ai_utils.utils import utils_file
from huggingface_hub import snapshot_download

# from src.models.src_osum.llm_asr.pipeline import inferencePipeline
import sys
sys.path.append("/mnt/sfs/asr/code/osum_xlgeng")
from wenet.utils.init_model import init_model


class audioProcessor:
    def __init__(self, chunk_size=16):
        self.n_fft = 400
        self.hop_length = 160
        self.num_mel_bins = 80
        self.padding = 0

    def get_feature(self,audio):
        # import pdb; pdb.set_trace()
        sample_rate = audio['sampling_rate']
        waveform = torch.tensor(audio['array'])  # (channel=1, sample) -> (sample,)
        print("111waveform shape:",waveform.shape, "sample_rate:",sample_rate)
        # if waveform.dim() > 1:
        #     waveform = waveform[1]
        # 重采样到16000Hz（如果需要）
        # waveform = torchaudio.functional.resample(
        #         waveform.unsqueeze(0), 
        #         orig_freq=sample_rate, 
        #         new_freq=16000
        #     ).squeeze(0)
        # sample_rate = 16000
        # print("222waveform shape:",waveform.shape)
        # sample_rate = 16000
        # waveform = torch.tensor(audio)  # (channel=1, sample) -> (sample,)
        # logger.debug(f"waveform shape: {waveform.shape}")
        # logger.debug(f"audio array shape: {audio['array'].shape}") 
        # logger.debug(f"padding size: {self.padding}")
        if self.padding > 0:
            waveform = F.pad(waveform, (0, self.padding))
        window = torch.hann_window(self.n_fft)
        stft = torch.stft(waveform,
                          self.n_fft,
                          self.hop_length,
                          window=window,
                          return_complex=True)
        magnitudes = stft[..., :-1].abs() ** 2

        filters = torch.from_numpy(
            librosa.filters.mel(sr=sample_rate,
                                n_fft=self.n_fft,
                                n_mels=self.num_mel_bins))
        mel_spec = filters @ magnitudes.float()

        # NOTE(xcsong): https://github.com/openai/whisper/discussions/269
        log_spec = torch.clamp(mel_spec, min=1e-10).log10()
        log_spec = torch.maximum(log_spec, log_spec.max() - 8.0)
        log_spec = (log_spec + 4.0) / 4.0
        return log_spec.transpose(0, 1)


class OsumAssistant(VoiceAssistant):
    def __init__(self):
        checkpoint_path = "/mnt/sfs/asr/code/osum_xlgeng/examples/wenetspeech/whisper/exp/epoch_29_LLMinstruct_cosyvoice1_10Wtts_2Khqtts_3Ks2s_5Ws2t/step_57499.pt"
        config_path = "/mnt/sfs/asr/code/osum_xlgeng/examples/wenetspeech/whisper/conf/config_llm_huawei_instruct-version_cosyvoice1-token.yaml"
        args = GxlNode({
        "checkpoint_path": checkpoint_path,
        })
        configs = utils_file.load_dict_from_yaml(config_path)
        self.model, self.configs = init_model(args, configs)
        self.audio_processor = audioProcessor()

    def generate_audio(
        self,
        prompt,
        audio,
        max_new_tokens=2048,
    ):
        try:
            device = torch.device('npu:0')
            feature = self.audio_processor.get_feature(audio)
            # prompt = "根据语音输入，直接以文字形式进行回答或对话。"
            self.model.to(device)
            # response = self.pipeline.model.generate(wavs=feature.unsqueeze(0).to(device), wavs_len=torch.tensor([feature.size(0)]).to(device), prompt=prompt)
            response = self.model.generate(wavs=feature.unsqueeze(0).to(device), wavs_len=torch.tensor([feature.size(0)]).to(device), prompt=prompt)
            print("result:",response)

            return response
        except Exception as e:
            logger.error(e)
            return ""

    def process_wav_scp(self, input_json_path, output_jsonl_path):
        """从JSON文件读取音频并生成JSONL结果文件
        
        Args:
            input_json_path: 输入JSON文件路径
            output_jsonl_path: 输出JSONL文件路径
        """
        import json
        import librosa
        from tqdm import tqdm
        
        with open(input_json_path, 'r', encoding='utf-8') as f_in, \
             open(output_jsonl_path, 'w', encoding='utf-8') as f_out:
            data_list = json.load(f_in)
            for item in tqdm(data_list):
                try:
                    audio, sr = librosa.load(item['audio_id'], mono=True, sr=16000)
                    audio_dict = {
                        'array': audio,
                        'sampling_rate': sr
                    }
                    choices_str = ", ".join(item.get("choices", []))
                    prompt = f"{item.get('question', '')}\nchoices: {choices_str}"
                    result = self.generate_audio(prompt, audio_dict)
                    output_item = {
                        "answer": item.get("answer", ""),
                        "model_output": result,
                        "task": item.get("task", ""),
                        "difficulty": item.get("difficulty", ""),
                        "choices": item.get("choices", []),
                        "sub-category": item.get("sub-category", "")
                    }
                    json.dump(output_item, f_out, ensure_ascii=False)
                    f_out.write('\n') 
                except Exception as e:
                    logger.error(f"处理音频{item.get('id', '')}失败: {str(e)}")

if __name__ == "__main__":
    assistant = OsumAssistant()
    assistant.process_wav_scp("/mnt/sfs/asr/code/osum_xlgeng/voicebench_wsy/mmau/mmau-test-mini.json", "./test-mini-audios.jsonl")