import base64
import json
import time

import argparse
# import gradio as gr
import os 

import sys
from pathlib import Path
import re
# sys.path.insert(0, '/mnt/sfs/asr/code/osum_xlgeng/examples/wenetspeech/tts')
# sys.path.insert(0, '/mnt/sfs/asr/code/osum_xlgeng/examples/wenetspeech/whisper')
# sys.path.insert(0, '/mnt/sfs/asr/code/osum_xlgeng_3B/examples/wenetspeech/tts/third_party/Matcha-TTS')
sys.path.insert(0, '../..')
sys.path.insert(0, '../../tts/third_party/Matcha-TTS')
sys.path.insert(0, '..')
from gxl_ai_utils.utils import utils_file
from wenet.utils.init_tokenizer import init_tokenizer
from gxl_ai_utils.config.gxl_config import GxlNode
from wenet.utils.init_model import init_model
import librosa
import torch
import torchaudio

is_npu = True

try:
    import torch_npu
except ImportError:
    is_npu = False
    print("torch_npu is not available. if you want to use npu, please install it.")


# gpu_id = 0

def get_args():
    parser = argparse.ArgumentParser(description='recognize with your model')
    parser.add_argument('--config', required=True, help='config file')
    parser.add_argument('--test_data', required=True, help='test data file')
    parser.add_argument('--gpu_id',
                        type=int,
                        default=-1,
                        help='gpu id for this rank, -1 for cpu')
    parser.add_argument('--checkpoint', required=True, help='checkpoint model')
    parser.add_argument('--result_dir', required=True, help='asr result file')

    args = parser.parse_args()
    print(args)
    return args

def main():
    args = get_args()
    configs = utils_file.load_dict_from_yaml(args.config)
    model, configs = init_model(args, configs)
    if is_npu:
        device = torch.device(f'npu:{args.gpu_id}')
    else:
        device =torch.device(f'cuda:{args.gpu_id}')
    model = model.to(device)
    tokenizer = init_tokenizer(configs)
    print(model)
    with open(args.test_data,"r") as f:
        f = f.readlines()
        for line in f:
            key,input_wav_path = line.strip().split()
            output_wav_path=os.path.join(args.result_dir, f"output_{key}.wav")
            timestamp_ms = int(time.time() * 1000)
            now_file_tmp_path_resample = f'~/.cache/.temp/{timestamp_ms}_resample.wav'
            do_resample(input_wav_path, now_file_tmp_path_resample)
            input_wav_path = now_file_tmp_path_resample
            waveform, sample_rate = torchaudio.load(input_wav_path)
            waveform = waveform.squeeze(0)  # (channel=1, sample) -> (sample,)
            window = torch.hann_window(400)
            stft = torch.stft(waveform,
                            400,
                            160,
                            window=window,
                            return_complex=True)
            magnitudes = stft[..., :-1].abs() ** 2

            filters = torch.from_numpy(
                librosa.filters.mel(sr=sample_rate,
                                    n_fft=400,
                                    n_mels=80))
            mel_spec = filters @ magnitudes

            # NOTE(xcsong): https://github.com/openai/whisper/discussions/269
            log_spec = torch.clamp(mel_spec, min=1e-10).log10()
            log_spec = torch.maximum(log_spec, log_spec.max() - 8.0)
            log_spec = (log_spec + 4.0) / 4.0
            feat = log_spec.transpose(0, 1)
            feat_lens = torch.tensor([feat.shape[0]], dtype=torch.int64).to(device)
            feat = feat.unsqueeze(0).to(device)
            prompt = "先根据语音输入，直接以文字形式进行回答或对话，接着再生成语音token。"
            res_text = model.generate_s2s(wavs=feat, wavs_len=feat_lens, prompt=prompt)[0]
            token_list_str = res_text.split('|')[-1]
            token_list = json.loads(token_list_str)
            token_list = [int(i) for i in token_list]
            token_list2wav2(token_list, output_wav_path)


gpu_id = 3
# import pdb;pdb.set_trace()
def init_model_my(checkpoint_path):
    # checkpoint_path="/mnt/sfs/asr/ckpt/qwen2_multi_task_4_6gpus_gxl_adapter/epoch_34_LLMinstruct_cosyvoice1_10Wtts_1WenTTS_2Khqtts_1KenS2S_3Ks2s_5Ws2t/step_47499.pt"
    config_path = "/mnt/sfs/asr/code/osum_xlgeng_3B/examples/wenetspeech/whisper/conf/config_llm_huawei_instruct_3B_cosyvoice1-token.yaml"
    args = GxlNode({
        "checkpoint": checkpoint_path,
    })
    configs = utils_file.load_dict_from_yaml(config_path)
    # configs['is_inference'] = True
    model, configs = init_model(args, configs)
    if is_npu:
        device = torch.device(f'npu:{gpu_id}')
    else:
        device =torch.device(f'cuda:{gpu_id}')
    model = model.to(device)
    tokenizer = init_tokenizer(configs)
    print(model)
    return model, tokenizer, device


# model, tokenizer, device = init_model_my()
# model.eval()
# model = model.to(model.llama_model.dtype)


def do_resample(input_wav_path, output_wav_path):
    """"""
    waveform, sample_rate = torchaudio.load(input_wav_path)
    # 检查音频的维度
    num_channels = waveform.shape[0]
    # 如果音频是多通道的，则进行通道平均
    if num_channels > 1:
        waveform = torch.mean(waveform, dim=0, keepdim=True)
    waveform = torchaudio.transforms.Resample(
        orig_freq=sample_rate, new_freq=16000)(waveform)
    utils_file.makedir_for_file(output_wav_path)
    torchaudio.save(output_wav_path, waveform, 16000)

def get_feat_from_wav_path(input_wav_path):
    """
    获取音频的特征
    Args:
        input_wav_path: str

    Returns:
        feat: tensor, shape=(1, T, 80)
        feat_lens: tensor, shape=(1,)
    """
    timestamp_ms = int(time.time() * 1000)
    now_file_tmp_path_resample = f'~/.cache/.temp/{timestamp_ms}_resample.wav'
    do_resample(input_wav_path, now_file_tmp_path_resample)
    input_wav_path = now_file_tmp_path_resample
    waveform, sample_rate = torchaudio.load(input_wav_path)
    waveform = waveform.squeeze(0)  # (channel=1, sample) -> (sample,)
    window = torch.hann_window(400)
    stft = torch.stft(waveform,
                      400,
                      160,
                      window=window,
                      return_complex=True)
    magnitudes = stft[..., :-1].abs() ** 2

    filters = torch.from_numpy(
        librosa.filters.mel(sr=sample_rate,
                            n_fft=400,
                            n_mels=80))
    mel_spec = filters @ magnitudes

    # NOTE(xcsong): https://github.com/openai/whisper/discussions/269
    log_spec = torch.clamp(mel_spec, min=1e-10).log10()
    log_spec = torch.maximum(log_spec, log_spec.max() - 8.0)
    log_spec = (log_spec + 4.0) / 4.0
    feat = log_spec.transpose(0, 1)
    feat_lens = torch.tensor([feat.shape[0]], dtype=torch.int64).to(device)
    feat = feat.unsqueeze(0).to(device)
    return feat, feat_lens


def infer_tts(input_text):
    prompt = "恳请将如下文本转换为其对应的语音token，力求生成最为流畅、自然的语音。"
    res_text = model.generate_tts(device=device, text=input_text, prompt=prompt)[0]
    return res_text
# from cosyvoice_util import token_list2wav2
def infer_s2s(input_wav_path, output_wav_path):
    from cosyvoice_util import token_list2wav2
    prompt = "先根据语音输入，直接以文字形式进行回答或对话，接着再生成语音token。"
    feat, feat_lens = get_feat_from_wav_path(input_wav_path)
    res_text = model.generate_s2s(wavs=feat, wavs_len=feat_lens, prompt=prompt)[0]
    token_list_str = res_text.split('|')[-1]
    token_list = json.loads(token_list_str)
    token_list = [int(i) for i in token_list]
    token_list2wav2(token_list, output_wav_path)
    return res_text

def infer_s2t(input_wav_path):
    prompt = "识别用户的语音提问并做出文字回答。"
    feat, feat_lens = get_feat_from_wav_path(input_wav_path)
    res_text = model.generate(wavs=feat, wavs_len=feat_lens, prompt=prompt)[0]
    return res_text

def infer_s2s_streaming(input_wav_path):
    prompt = "实现语音与文字的即时交流，直接进行文字和语音token的交错回复，每回复6个文字,就转化为18个语音token。"
    feat, feat_lens = get_feat_from_wav_path(input_wav_path)
    feat = feat.to(torch.bfloat16)
    feat_lens = feat_lens.to(torch.int32)
    res_text = model.generate_s2s_streaming(wavs=feat, wavs_len=feat_lens, prompt=prompt)[0]
    return res_text

from cosyvoice_util import token_list2wav2
if __name__=="__main__":

    output_ckpt_mix_lora_path="/mnt/sfs/asr/ckpt/qwen2_multi_task_4_6gpus_gxl_adapter/full_train_llm_3B_epoch_4/step_2499.pt"
    # output_ckpt_mix_lora_path="/home/work_nfs16/asr_data/ckpt/understand_model_3B/full_train_llm_3B_epoch_3/step_22499.pt"
    model, tokenizer, device = init_model_my(output_ckpt_mix_lora_path)
    ckpt_path = Path(output_ckpt_mix_lora_path)
    step_match = re.search(r"step_(\d+)\.pt", ckpt_path.name)
    if step_match is None:
        raise ValueError(f"无法从模型路径中提取 step，例如 step_2499.pt：{ckpt_path}")
    step_id = step_match.group(1)  # 如 '2499'

    # ==== 构造输出目录 ====
    ckpt_dir = ckpt_path.parent  # 即 .../full_train_llm_3B_epoch_4/
    output_dir = ckpt_dir / f"test_step_{step_id}.pt" / "chat_wav"
    output_dir.mkdir(parents=True, exist_ok=True)

    model.eval()
    data_list_path = "/mnt/sfs/asr/test_data/test_sets_format_3000/aslp_chat_test/data.list"
    streaming_txt_path = output_dir / "streaming_result.txt"
    nonstreaming_txt_path = output_dir / "non_streaming_result.txt"

    streaming_txt_f = open(streaming_txt_path, "w", encoding="utf-8")
    nonstreaming_txt_f = open(nonstreaming_txt_path, "w", encoding="utf-8")

    with open(data_list_path, 'r', encoding='utf-8') as f:
        for line in f:
            data = json.loads(line.strip())
            input_wav = data["wav"]
            utt_id = data["key"]
            txt = data['txt']
            print(f"Processing {utt_id}...")

            feat, feat_lens = get_feat_from_wav_path(input_wav)
            feat = feat.to(torch.bfloat16).to(device)
            feat_lens = feat_lens.to(torch.int32).to(device)

            # === 流式推理 ===
            prompt_streaming = "直接将用户语音转文字，并在每6字后加18个语音token交替应答。"
            res_text_streaming = model.generate_s2s_streaming(
                wavs=feat, wavs_len=feat_lens, prompt=prompt_streaming
            )[0]

            token_list_str = res_text_streaming.split('|')[-1]
            token_list = json.loads(token_list_str)
            token_list = [int(i) for i in token_list]

            # 保存流式语音和文本
            token_list2wav2(token_list, str(output_dir / f"{utt_id}_streaming.wav"))
            res_text_streaming_main = res_text_streaming.split('|')[0].strip()
            streaming_txt_f.write(f"{utt_id} {data['txt']} | {res_text_streaming_main}\n")

            # === 非流式推理 ===
            prompt_nonstreaming = "先根据语音输入，直接以文字形式进行回答或对话，接着再生成语音token。"
            res_text_nonstreaming = model.generate_s2s(
                wavs=feat, wavs_len=feat_lens, prompt=prompt_nonstreaming
            )[0]

            token_list_str = res_text_nonstreaming.split('|')[-1]
            token_list = json.loads(token_list_str)
            token_list = [int(i) for i in token_list]

            token_list2wav2(token_list, str(output_dir / f"{utt_id}_nonstreaming.wav"))
            res_text_nonstreaming_main = res_text_nonstreaming.split('|')[0].strip()
            nonstreaming_txt_f.write(f"{utt_id} {data['txt']} | {res_text_nonstreaming_main}\n")
            print("非流式结果:", res_text_nonstreaming)
    streaming_txt_f.close()
    nonstreaming_txt_f.close()




