# import base64
import json
import time
# import gradio as gr
import sys

# --- 原始导入和路径设置 ---
sys.path.insert(0, '../../../../')
sys.path.insert(0, '.')
sys.path.insert(0, '../../tts/third_party/Matcha-TTS')
sys.path.append('../../tts')

# from cosyvoice.utils.file_utils import load_wav
# from patches import modelling_qwen2_infer_patch  # 打patch
# from cosyvoice_util import token_list2wav, token_list2wav_wsy
from gxl_ai_utils.utils import utils_file

try:
    from wenet.utils.init_tokenizer import init_tokenizer
    from gxl_ai_utils.config.gxl_config import GxlNode
    from wenet.utils.init_model import init_model
    import logging
    import librosa
    import torch
    import torchaudio
except ImportError:
    pass

is_npu = True
try:
    import torch_npu
except ImportError:
    is_npu = False
    print("torch_npu is not available. if you want to use npu, please install it.")




device = torch.device(f'npu') if is_npu else torch.device('cuda' if torch.cuda.is_available() else 'cpu')



def load_model_and_tokenizer(checkpoint_path, config_path):
    """
    封装了加载模型和分词器的逻辑
    Args:
        checkpoint_path (str): 模型权重文件路径
        config_path (str): 模型配置文件路径
    Returns:
        model: 加载好的模型
        tokenizer: 加载好的分词器
    """
    print(f"正在从以下路径加载模型: {checkpoint_path}")
    args = GxlNode({"checkpoint": checkpoint_path})
    configs = utils_file.load_dict_from_yaml(config_path)
    model, configs = init_model(args, configs)
    model = model.to(device)
    model = model.to(torch.bfloat16)
    model.eval()  # 设置为评估模式
    tokenizer = init_tokenizer(configs)
    print(f"模型 {checkpoint_path} 加载完成并移动到 {device}")
    return model, tokenizer



CHECKPOINT_PATH_A="/mnt/sfs/asr/ckpt/qwen2_multi_task_4_6gpus_gxl_adapter/full_train_new_pattern_from_epoch9_now_epoch10_change_rate_full_tag_pattern_add_qwenomni_sanjianke_add_mmau_en2zh/step_21249.pt"
CONFIG_PATH_A = "../conf/config_llm_huawei_instruct_3B_cosyvoice1-token.yaml"

print("开始加载模型 A...")
model_a, tokenizer_a = load_model_and_tokenizer(CHECKPOINT_PATH_A, CONFIG_PATH_A)


def do_resample(input_wav_path, output_wav_path):
    """..."""
    waveform, sample_rate = torchaudio.load(input_wav_path)
    if waveform.shape[0] > 1:
        waveform = torch.mean(waveform, dim=0, keepdim=True)
    resampler = torchaudio.transforms.Resample(orig_freq=sample_rate, new_freq=16000)
    waveform = resampler(waveform)
    utils_file.makedir_for_file(output_wav_path)
    torchaudio.save(output_wav_path, waveform, 16000)


def get_feat_from_wav_path(input_wav_path):
    """..."""
    timestamp_ms = int(time.time() * 1000)
    now_file_tmp_path_resample = f'~/.cache/.temp/{timestamp_ms}_resample.wav'
    do_resample(input_wav_path, now_file_tmp_path_resample)
    input_wav_path = now_file_tmp_path_resample
    waveform, sample_rate = torchaudio.load(input_wav_path)
    waveform = waveform.squeeze(0)
    window = torch.hann_window(400)
    stft = torch.stft(waveform, 400, 160, window=window, return_complex=True)
    magnitudes = stft[..., :-1].abs() ** 2
    filters = torch.from_numpy(librosa.filters.mel(sr=sample_rate, n_fft=400, n_mels=80))
    mel_spec = filters @ magnitudes
    log_spec = torch.clamp(mel_spec, min=1e-10).log10()
    log_spec = torch.maximum(log_spec, log_spec.max() - 8.0)
    log_spec = (log_spec + 4.0) / 4.0
    feat = log_spec.transpose(0, 1)
    feat_lens = torch.tensor([feat.shape[0]], dtype=torch.int64).to(device)
    feat = feat.unsqueeze(0).to(device)
    return feat, feat_lens


def do_s2t(model, input_wav_path, input_prompt, profile=False):  # 增加 model 参数
    model.eval()
    feat, feat_lens = get_feat_from_wav_path(input_wav_path)
    print(f'feat shape: {feat.shape}, feat_lens: {feat_lens}')
    if is_npu: torch_npu.npu.synchronize()
    start_time = time.time()
    res_text = model.generate(wavs=feat, wavs_len=feat_lens, prompt=input_prompt, cache_implementation="static")[0]
    if is_npu: torch_npu.npu.synchronize()
    end_time = time.time()
    print(f"S2T 推理消耗时间: {end_time - start_time:.2f} 秒")
    return res_text


def do_s2t4chat(model, input_wav_path, input_prompt, profile=False):  # 增加 model 参数
    model.eval()
    feat, feat_lens = get_feat_from_wav_path(input_wav_path)
    feat = feat.to(torch.bfloat16)
    print(f'feat shape: {feat.shape}, feat_lens: {feat_lens}')
    if is_npu: torch_npu.npu.synchronize()
    start_time = time.time()
    res_text = model.generate4chat(wavs=feat, wavs_len=feat_lens, prompt=input_prompt, cache_implementation="static")[0]
    if is_npu: torch_npu.npu.synchronize()
    end_time = time.time()
    print(f"S2T4Chat 推理消耗时间: {end_time - start_time:.2f} 秒")
    return res_text


def do_t2s(model, input_prompt, text_for_tts, profile=False):  # 增加 model 参数
    model.eval()
    if is_npu: torch_npu.npu.synchronize()
    start_time = time.time()
    res_tensor = model.generate_tts(device=device, text=text_for_tts, prompt=input_prompt)[0]
    res_token_list = res_tensor.tolist()
    res_text = res_token_list[:-1]
    if is_npu: torch_npu.npu.synchronize()
    end_time = time.time()
    print(f"T2S 推理消耗时间: {end_time - start_time:.2f} 秒")
    return res_text


def do_t2t(model, question_txt, profile=False):  # 增加 model 参数
    model.eval()
    if is_npu: torch_npu.npu.synchronize()
    start_time = time.time()
    print(f'开始t2t推理, question_txt: {question_txt}')
    res_text = model.generate_text2text(device=device, text=question_txt)[0]
    if is_npu: torch_npu.npu.synchronize()
    end_time = time.time()
    print(f"T2T 推理消耗时间: {end_time - start_time:.2f} 秒")
    return res_text


def do_s2s(model, input_wav_path, input_prompt, profile=False):  # 增加 model 参数
    model.eval()
    feat, feat_lens = get_feat_from_wav_path(input_wav_path)
    print(f'feat shape: {feat.shape}, feat_lens: {feat_lens}')
    if is_npu: torch_npu.npu.synchronize()
    start_time = time.time()
    output_text, text_res, speech_res = model.generate_s2s_no_stream(wavs=feat, wavs_len=feat_lens, prompt=input_prompt)
    if is_npu: torch_npu.npu.synchronize()
    end_time = time.time()
    print(f"S2S 推理消耗时间: {end_time - start_time:.2f} 秒")
    return f'{output_text[0]}|{str(speech_res[0].tolist()[1:])}'

def do_s2s_think(model, input_wav_path, input_prompt, profile=False):  # 增加 model 参数
    model.eval()
    feat, feat_lens = get_feat_from_wav_path(input_wav_path)
    print(f'feat shape: {feat.shape}, feat_lens: {feat_lens}')
    if is_npu: torch_npu.npu.synchronize()
    start_time = time.time()
    output_text, text_res, speech_res = model.generate_s2s_no_stream_think(wavs=feat, wavs_len=feat_lens, prompt=input_prompt)
    if is_npu: torch_npu.npu.synchronize()
    end_time = time.time()
    print(f"S2S 推理消耗时间: {end_time - start_time:.2f} 秒")
    return f'{output_text[0]}|{str(speech_res[0].tolist()[1:])}'

def true_decode_fuc(model, tokenizer, input_wav_path, input_prompt):  # 增加 model 和 tokenizer 参数
    print(f"wav_path: {input_wav_path}, prompt:{input_prompt}")
    if input_wav_path is None and not input_prompt.endswith(("_TTS", "_T2T")):
        print("音频信息未输入，且不是T2S或T2T任务")
        return "错误：需要音频输入"

    if input_prompt.endswith("_TTS"):
        text_for_tts = input_prompt.replace("_TTS", "")
        prompt = "恳请将如下文本转换为其对应的语音token，力求生成最为流畅、自然的语音。"
        res_text = do_t2s(model, prompt, text_for_tts)
    elif input_prompt.endswith("_self_prompt"):
        prompt = input_prompt.replace("_self_prompt", "")
        res_text = do_s2t(model, input_wav_path, prompt)
    elif input_prompt.endswith("_T2T"):
        question_txt = input_prompt.replace("_T2T", "")
        res_text = do_t2t(model, question_txt)
    elif input_prompt in ["识别语音内容，并以文字方式作出回答。",
                          "请推断对这段语音回答时的情感，标注情感类型，撰写流畅自然的聊天回复，并生成情感语音token。",
                          "将下列语音进行理解并生成相应回答，之后请将回答文本转为语音token。"]:
        res_text = do_s2s(model, input_wav_path, input_prompt)
    elif input_prompt == "THINK":
        res_text = do_s2s_think(model, input_wav_path, input_prompt)
    elif input_prompt == "用户通过语音与你交互，请作出书面回应。":
        res_text = do_s2t4chat(model, input_wav_path, input_prompt)
    else:
        res_text = do_s2t(model, input_wav_path, input_prompt)
        res_text = res_text.replace("<youth>", "<adult>").replace("<middle_age>", "<adult>").replace("<middle>",
                                                                                                     "<adult>")

    print("识别结果为：", res_text)
    return res_text


def do_decode(model, tokenizer, input_wav_path, input_prompt):  # 增加 model 和 tokenizer 参数
    print(f'使用模型进行推理: input_wav_path={input_wav_path}, input_prompt={input_prompt}')
    output_res = true_decode_fuc(model, tokenizer, input_wav_path, input_prompt)
    return output_res


def save_to_jsonl(if_correct, wav, prompt, res):
    data = {
        "if_correct": if_correct,
        "wav": wav,
        "task": prompt,
        "res": res
    }
    with open("results.jsonl", "a", encoding="utf-8") as f:
        f.write(json.dumps(data, ensure_ascii=False) + "\n")


def download_audio(input_wav_path):
    return input_wav_path if input_wav_path else None


# def get_wav_from_token_list(input_list, prompt_speech):
#     return token_list2wav_wsy(input_list, prompt_speech)

if __name__ == '__main__':
    input_wav_path = "/mnt/sfs/asr/test_data/test_sets_format_3000/FLUB_n_test_s2t_by_znlin_added_by_20250424/82bf577b3a3d348e277d2ccb8f31e12ab7bada96.wav"
    res =  do_s2t4chat(model_a, input_wav_path, "none")
