from transformers import pipeline
import numpy as np
from wakeup_and_vad import WakeupAndVAD
from accelerate import infer_auto_device_map, init_empty_weights
from transformers.pipelines.audio_utils import ffmpeg_read
import yaml
import librosa

import requests
import json
import time
import gradio as gr
import os
import sys
import torch
import torchaudio
from torch import dtype
import threading
from datetime import datetime

# sys.path.insert(0, '../../../../')
from gxl_ai_utils.utils import utils_file
from gxl_ai_utils.config.gxl_config import GxlNode
from wenet_interupt.utils.init_model import init_model
import logging
import collections
import queue


TASK_PROMPT_MAPPING = {
    "<TRANSCRIBE><BACKCHANNEL>": "请将这段音频进行转录，并在文本末尾附加<附和语句（backchannel）判断>标签。附和语句判断分为两种：backchannel，no_backchannel。",
    "<TRANSCRIBE><COMPLETE>": "请将这段音频进行转录，并在转录完成的文本末尾附加<语义完整性>标签。语义完整性类别分为两种：complete和incomplete。",
    "<TRANSCRIBE><SPEAKER>": "对如下音频进行语音识别，同时转录时需要在转录句子之后标注每一句话的发言者，说话人标签序号按出现顺序依次递增，诸如<speaker_1>、<speaker_2>等标签。如果出现两个说话人同时说话的情况，需要先转录先发言的说话人说的话。",
}

gpu_id = 0

def init_model_my():
    logging.basicConfig(level=logging.DEBUG,
                        format='%(asctime)s %(levelname)s %(message)s')
    checkpoint_path = "/home/work_nfs11/gjli/ckpt/wenet_undersdand_and_speech/interrupt_stage1_asr_backchannel_complete_0.5b_4.6/epoch_0.pt"
    config_path = "/home/work_nfs16/gjli/workspaces/wenet_speech_interrupt/examples/wenetspeech/whisper/conf/finetune_whisper_medium_gxl_adapter_interrupt_infer.yaml"
    args = GxlNode({
        "checkpoint": checkpoint_path,
    })
    configs = utils_file.load_dict_from_yaml(config_path)
    model, configs = init_model(args, configs)
    model = model.to(torch.device(f'cuda:{gpu_id}'))
    print(model)
    return model


model = init_model_my()


def do_resample(input_wav_path, output_wav_path):
    """"""
    print(f'input_wav_path: {input_wav_path}, output_wav_path: {output_wav_path}')
    waveform, sample_rate = torchaudio.load(input_wav_path)
    # 检查音频的维度
    num_channels = waveform.shape[0]
    # 如果音频是多通道的，则进行通道平均
    if num_channels > 1:
        waveform = torch.mean(waveform, dim=0, keepdim=True)
    waveform = torchaudio.transforms.Resample(
        orig_freq=sample_rate, new_freq=16000)(waveform)
    utils_file.makedir_for_file(output_wav_path)
    torchaudio.save(output_wav_path, waveform, 16000)


def true_decode_fuc(input_wav_path, input_prompt):
    # input_prompt = TASK_PROMPT_MAPPING.get(input_prompt, "未知任务类型")
    print(f"wav_path: {input_wav_path}, prompt:{input_prompt}")
    timestamp_ms = int(time.time() * 1000)
    now_file_tmp_path_resample = f'/home/work_nfs16/gjli/temp/{timestamp_ms}_resample.wav'
    do_resample(input_wav_path, now_file_tmp_path_resample)
    input_wav_path = now_file_tmp_path_resample
    waveform, sample_rate = torchaudio.load(input_wav_path)
    waveform = waveform.squeeze(0)  # (channel=1, sample) -> (sample,)
    print(f'wavform shape: {waveform.shape}, sample_rate: {sample_rate}')
    window = torch.hann_window(400)
    stft = torch.stft(waveform,
                      400,
                      160,
                      window=window,
                      return_complex=True)
    magnitudes = stft[..., :-1].abs() ** 2

    filters = torch.from_numpy(
        librosa.filters.mel(sr=sample_rate,
                            n_fft=400,
                            n_mels=80))
    mel_spec = filters @ magnitudes

    # NOTE(xcsong): Mel spectrogram computation and audio pre-processing · openai/whisper · Discussion #269
    log_spec = torch.clamp(mel_spec, min=1e-10).log10()
    log_spec = torch.maximum(log_spec, log_spec.max() - 8.0)
    log_spec = (log_spec + 4.0) / 4.0
    feat = log_spec.transpose(0, 1)
    feat_lens = torch.tensor([feat.shape[0]], dtype=torch.int64).to(torch.device(f'cuda:{gpu_id}'))
    feat = feat.unsqueeze(0).to(torch.device(f'cuda:{gpu_id}'))
    # feat = feat.half()
    # feat_lens = feat_lens.half()
    res_text = model.generate(wavs=feat, wavs_len=feat_lens, prompt=input_prompt)[0]
    print("输出结果：", res_text)
    return res_text


# init vad
vad_model = WakeupAndVAD("/home/work_nfs10/yzli/silero-vad/files", cache_history=10)
chunk_size = vad_model.get_chunk_size()

target_sample_rate = 16000

def response(pre_state, pre_txt, new_chunk):
    # 清理过期的pending_audio
    if 'pending_audio' in pre_state:
        if time.time() - pre_state['pending_time'] > 6:
            del pre_state['pending_audio']
            del pre_state['pending_time']
            pre_state['collected_audio'] = torch.tensor([])
            pre_state['error'] = '超时，预存音频被清除，请重新提问'  # 错误信息存到pre_state
            return pre_state, pre_txt  # 保持pre_txt不变
    
    sr, y = new_chunk
    
    # 转换numpy到torch
    waveform = torch.from_numpy(y).unsqueeze(0) / (1 << 15)
    if sr != target_sample_rate:
        waveform = torchaudio.transforms.Resample(orig_freq=sr, new_freq=target_sample_rate)(waveform)

    # 运行vad
    if 'collected_audio' not in pre_state:
        collected_audio = torch.tensor([])
    else:
        collected_audio = pre_state['collected_audio']
    
    if collected_audio.numel() == 0:
        collected_audio = waveform
    else:
        collected_audio = torch.cat((collected_audio, waveform), dim=1)

    while collected_audio.shape[1] >= chunk_size:
        data = collected_audio[:, :chunk_size]
        res = vad_model.predict(data)
        collected_audio = collected_audio[:, chunk_size:]
        if res is not None and "cache_dialog" in res:
            recording_wave = res["cache_dialog"]
            pre_state['output'] = (target_sample_rate, recording_wave)
            try:
                pre_txt = int(pre_txt) + 1  # 安全转换
            except ValueError:
                pre_txt = 0  # 重置为默认值
    
    pre_state['collected_audio'] = collected_audio
    
    return pre_state, pre_txt

def generate_response(state):
    # 优先处理错误信息
    if 'error' in state:
        error_msg = state.pop('error')
        return error_msg
    
    # 处理拼接逻辑
    if 'pending_audio' in state:
        # 获取前后两段音频
        prev_audio = state['pending_audio']
        curr_sample_rate, curr_audio = state['output']
        
        # 拼接音频
        prev_audio = prev_audio.unsqueeze(0)
        curr_audio = curr_audio.unsqueeze(0)
        combined_audio = torch.cat([prev_audio, curr_audio], dim=1)
        
        # 生成唯一文件名
        output_dir = "/home/work_nfs16/gjli/temp"
        timestamp = int(time.time() * 1000)
        output_filename = f"combined_audio_{timestamp}.wav"
        output_wav_path = os.path.join(output_dir, output_filename)
        
        # 保存拼接后的音频
        torchaudio.save(output_wav_path, combined_audio, curr_sample_rate)
        
        # 清理pending状态
        del state['pending_audio']
        del state['pending_time']
        
        # 调用模型处理
        prompt = TASK_PROMPT_MAPPING["<TRANSCRIBE><COMPLETE>"]
        response = true_decode_fuc(output_wav_path, prompt)
        response = response.replace("<|endoftext|>", "")
        
        if "<complete>" in response:
            return response + '|检测到完整内容，进行回答'
        else:
            return response + "|检测到不完整内容，请重新提问"
    
    # 正常处理流程
    target_sample_rate, recording_wave = state['output']
    
    # 生成唯一文件名
    output_dir = "/home/work_nfs16/gjli/temp"
    timestamp = int(time.time() * 1000)
    output_filename = f"output_audio_{timestamp}.wav"
    output_wav_path = os.path.join(output_dir, output_filename)
    
    # 保存音频
    audio_tensor = recording_wave.unsqueeze(0) 
    torchaudio.save(output_wav_path, audio_tensor, target_sample_rate)
    
    # 调用模型处理
    prompt = TASK_PROMPT_MAPPING["<TRANSCRIBE><COMPLETE>"]
    response = true_decode_fuc(output_wav_path, prompt)
    response = response.replace("<|endoftext|>", "")
    
    # 处理不完整结果
    if "<incomplete>" in response:
        state['pending_audio'] = recording_wave
        state['pending_time'] = time.time()
        return response + "|检测到话未说完，继续聆听..."
    elif "<complete>" in response:
        response = response + '|检测到完整内容，进行回答'
    return response

def tts(response):
    audio = json.loads(requests.post("http://127.0.0.1:8000", json={"text": response}).content)['audio']
    audio = np.array(audio) * (1 << 15)

    return (22050, audio.astype(np.int16))

with gr.Blocks() as demo:
    with gr.Column():
        inp = gr.Audio(sources=["microphone"], streaming=True)
        # oup = gr.Audio(streaming=True, autoplay=True)
        ans_txt = gr.Textbox()
        txt = gr.Textbox(0, visible=False)
    state = gr.State({})
    
    inp.stream(response, [state, txt, inp], [state, txt], stream_every=0.2)
    txt.change(generate_response, state, ans_txt)
    # ans_txt.change(tts, ans_txt, oup)

demo.launch()