import os
import torch
import shlex
from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor
import torchaudio
import subprocess
from my_util import Logger

loger = Logger()

# 模型镜像
# https://hf-mirror.com/jonatasgrosman/wav2vec2-large-xlsr-53-chinese-zh-cn
CH_MODEL_ID = os.path.join(os.getcwd(), 'models', 'ch-voi-text')
# 模型镜像
# https://hf-mirror.com/jonatasgrosman/wav2vec2-large-xlsr-53-english/tree/main
EN_MODEL_ID = os.path.join(os.getcwd(), 'models', 'en-voi-text')

def convert_webm_to_wav(input_file, output_file):
    """
    使用ffmpeg将WebM文件转换为WAV文件。
    参数:
    input_file (str): 输入的WebM文件名。
    output_file (str): 输出的WAV文件名。
    """
    loger.debug(f"input file {input_file}")
    loger.debug(f"output file {output_file}")

    try:
        if os.path.exists(output_file):
            os.remove(output_file)
            loger.debug(f"file {output_file} remove success")
        else:
            loger.warning(f"file {output_file} not exist")
    except PermissionError:
        loger.error(f"cant remove file {output_file}, access denied")
    except Exception as e:
        loger.error(f"remove file {output_file} meet error: {e}")

    try:
        # FFmpeg命令行参数
        # cmd = [
        #     'ffmpeg',
        #     '-i', input_file,  # 输入文件
        #     '-ar', '16000',  # 输出音频采样率为16000Hz
        #     output_file  # 输出文件名
        # ]
        input_file = input_file.replace('\\', '\\\\')
        output_file = output_file.replace('\\', '\\\\')
        cli_cmd = f'ffmpeg -i {input_file} -ar 16000 {output_file}'
        cmd = shlex.split(cli_cmd)
        loger.debug(f"shlex.split {cmd}")

        # 执行FFmpeg命令并等待其完成
        result = subprocess.run(cmd)
        loger.debug(f"subprocess result {result}")
        loger.debug(f"Successfully converted {input_file} to {output_file}")
    except subprocess.CalledProcessError as e:
        loger.error(f"Error occurred: {e}")
    finally:
        return


def init_voice_recognize_models():

    try:
        # 加载中文语音识别模型
        global process_ch2en
        global model_ch2en
        process_ch2en = Wav2Vec2Processor.from_pretrained(CH_MODEL_ID)
        model_ch2en = Wav2Vec2ForCTC.from_pretrained(CH_MODEL_ID)

        # 加载英语语音识别模型
        global process_en2ch
        global model_en2ch
        process_en2ch = Wav2Vec2Processor.from_pretrained(EN_MODEL_ID)
        model_en2ch = Wav2Vec2ForCTC.from_pretrained(EN_MODEL_ID)
    except Exception as e:
        # 捕获所有异常，并打印错误信息
        loger.error(f"An error occurred: {e}")
    finally:
        loger.info(f"load voice recognize models success")
        return



def convert_ch_voi2text(webm_voi):
    audio_path = os.path.join(os.path.dirname(webm_voi), "result.wav")
    convert_webm_to_wav(webm_voi, audio_path)

    # pip install pysoundfile 除了安装torchaudio，还需要安装pysoundfile
    waveform, sample_rate = torchaudio.load(audio_path)
    loger.debug(f"audio file {audio_path} waveform is {waveform}")
    loger.debug(f"audio file {audio_path} sample rate is {sample_rate}")

    # 模型期望的采样率是16000Hz，而你的音频是44100Hz，则需要进行重采样
    if sample_rate != 16000:  # 这个模型的采样率是16000Hz，20240536-2229
        resampler = torchaudio.transforms.Resample(orig_freq=sample_rate, new_freq=16000)
        waveform = resampler(waveform)
        sample_rate = 16000

    # 使用特征提取器处理音频数据
    input_values = process_ch2en(waveform, sampling_rate=sample_rate, return_tensors="pt", padding=True).input_values

    # 获取预测结果（logits）
    with torch.no_grad():
        logits = model_ch2en(input_values.squeeze(0)).logits

    # print(logits)

    predicted_ids = torch.argmax(logits, dim=-1)
    predicted_sentences = process_ch2en.batch_decode(predicted_ids)

    loger.info(predicted_sentences)

    return predicted_sentences

def convert_en_voi2text(webm_voi):
    audio_path = os.path.join(os.path.dirname(webm_voi), "result.wav")
    convert_webm_to_wav(webm_voi, audio_path)

    # pip install pysoundfile 除了安装torchaudio，还需要安装pysoundfile
    waveform, sample_rate = torchaudio.load(audio_path)
    loger.debug(f"audio file {audio_path} waveform is {waveform}")
    loger.debug(f"audio file {audio_path} sample rate is {sample_rate}")

    # 模型期望的采样率是16000Hz，而你的音频是44100Hz，则需要进行重采样
    if sample_rate != 16000:  # 这个模型的采样率是16000Hz，20240536-2229
        resampler = torchaudio.transforms.Resample(orig_freq=sample_rate, new_freq=16000)
        waveform = resampler(waveform)
        sample_rate = 16000

    # 使用特征提取器处理音频数据
    input_values = process_en2ch(waveform, sampling_rate=sample_rate, return_tensors="pt", padding=True).input_values

    # 获取预测结果（logits）
    with torch.no_grad():
        logits = model_en2ch(input_values.squeeze(0)).logits

    # print(logits)

    predicted_ids = torch.argmax(logits, dim=-1)
    predicted_sentences = process_en2ch.batch_decode(predicted_ids)

    loger.info(predicted_sentences)

    return predicted_sentences