bl_info = {
    "name": "Audio Viseme Mapping Panel",
    "blender": (4, 3, 0),
    "category": "Object",
	"author": "jifanglab"
}

import bpy
import os  # 导入操作系统接口模块
import wave  # 导入处理 WAV 文件的模块
import json  # 导入 JSON 数据处理模块
from pydub import AudioSegment  # 导入 Pydub 音频处理模块
from pypinyin import pinyin, Style  # 导入 pypinyin 拼音转换模块

# 初始化 Vosk 模型的路径（注意：模型加载会在实际使用时进行）
model_path = "E:\\blender\\plugins\\zijixiede\\audiotolib\\vosk-model-cn-0.22"  # 修改为实际模型路径
model = None  # 延迟加载模型

# 定义嘴型映射，将嘴型映射到自然数（0-16）
initials_to_viseme = {
    'b': 1, 'p': 1, 'm': 1, 'f': 5,
    'd': 2, 't': 2, 'n': 2, 'l': 2,
    'g': 3, 'k': 3, 'h': 3,
    'j': 4, 'q': 4, 'x': 4,
    'zh': 4, 'ch': 4, 'sh': 4, 'r': 4,
    'z': 4, 'c': 4, 's': 4,
    'y': 9, 'w': 10,
    '': 1  # 空字符串表示无声母，映射为静音
}

finals_to_viseme = {
    'a': 6, 'ai': 6, 'an': 6, 'ang': 6, 'ao': 6,
    'e': 8, 'ei': 8, 'en': 8, 'eng': 8, 'er': 8,
    'i': 9, 'ia': 9, 'ian': 9, 'iang': 9, 'iao': 9, 'ie': 9, 'in': 9, 'ing': 9, 'iong': 9, 'iu': 9,
    'o': 7, 'ong': 7, 'ou': 7,
    'u': 10, 'ua': 10, 'uai': 10, 'uan': 10, 'uang': 10, 'ue': 10, 'ui': 10, 'un': 10, 'uo': 10,
    'ü': 10, 'üe': 10,
    '': 1  # 空字符串表示无韵母，映射为静音
}


def load_model():
    global model
    if model is None:
        from vosk import Model  # 延迟导入 Vosk 模块
        model = Model(model_path)

def convert_audio_to_wav(audio_path, output_path):
    audio = AudioSegment.from_file(audio_path)  # 从文件中读取音频
    audio = audio.set_channels(1)  # 转换为单声道
    audio = audio.set_frame_rate(16000)  # 设置采样率为 16000 Hz
    audio = audio.set_sample_width(2)  # 设置采样宽度为 16 位
    audio.export(output_path, format="wav")  # 导出为 WAV 格式
    return audio  # 返回处理后的音频对象

def audio_to_text(audio_path):
    load_model()
    from vosk import KaldiRecognizer  # 延迟导入 KaldiRecognizer
    wf = wave.open(audio_path, "rb")
    if wf.getnchannels() != 1 or wf.getsampwidth() != 2 or wf.getcomptype() != "NONE":
        raise ValueError("音频文件必须是WAV格式单声道PCM，位宽为16位")

    recognizer = KaldiRecognizer(model, wf.getframerate())
    recognizer.SetWords(True)

    words_with_times = []
    while True:
        data = wf.readframes(4000)
        if len(data) == 0:
            break
        if recognizer.AcceptWaveform(data):
            result = json.loads(recognizer.Result())
            if 'result' in result:
                words_with_times.extend(result['result'])

    final_result = json.loads(recognizer.FinalResult())
    if 'result' in final_result:
        words_with_times.extend(final_result['result'])

    return words_with_times

def text_to_phonemes(text):
    # 使用 pypinyin 进行拼音转换
    initials = pinyin(text, style=Style.INITIALS, strict=False)  # 提取声母
    finals = pinyin(text, style=Style.FINALS, strict=False)  # 提取韵母
    phonemes = [(i[0], f[0]) for i, f in zip(initials, finals)]  # 组合声母和韵母
    print(f"Phonemes: {phonemes}")  # 打印拼音
    return phonemes  # 返回拼音

def phonemes_to_visemes(phonemes):
    visemes = []  # 初始化嘴型列表
    for initial, final in phonemes:
        initial_viseme = initials_to_viseme.get(initial, 1)  # 获取声母对应的嘴型
        final_viseme = finals_to_viseme.get(final, 1)  # 获取韵母对应的嘴型
        visemes.append(initial_viseme)  # 添加声母嘴型到列表
        visemes.append(final_viseme)  # 添加韵母嘴型到列表
    print(f"Visemes: {visemes}")  # 打印嘴型
    return visemes  # 返回嘴型列表

def audio_to_visemes(audio_path, fps=24, start_frame=1):
    wav_path = "converted_audio.wav"  # 定义转换后的音频路径
    audio = convert_audio_to_wav(audio_path, wav_path)  # 转换音频文件
    words_with_times = audio_to_text(wav_path)  # 将音频转换为文本

    frame_visemes = []
    prev_end_time = 0  # 上一个词的结束时间
    for word_data in words_with_times:
        word = word_data['word']
        start_time = word_data['start']
        end_time = word_data['end']

        # 处理静音部分
        if start_time > prev_end_time:
            silent_start_frame = start_frame + int(prev_end_time * fps)
            silent_end_frame = start_frame + int(start_time * fps)
            for frame in range(silent_start_frame, silent_end_frame):
                frame_visemes.append((frame, 1))

        # 计算这个词的起始和结束帧
        start_frame_word = start_frame + int(start_time * fps)
        end_frame_word = start_frame + int(end_time * fps)

        # 获取这个词的音素
        phonemes = text_to_phonemes(word)
        visemes = phonemes_to_visemes(phonemes)

        # 计算这个词中每个音素的持续帧数
        duration_frames = end_frame_word - start_frame_word
        frames_per_viseme = duration_frames / len(visemes)

        # 为每个音素生成关键帧
        for i, viseme in enumerate(visemes):
            frame = start_frame_word + int(i * frames_per_viseme)
            frame_visemes.append((frame, viseme))

        prev_end_time = end_time

    return sorted(frame_visemes, key=lambda x: x[0])

class AUDIOVISMEMAPPING_OT_ImportAudio(bpy.types.Operator):
    bl_idname = "audiovismemapping.import_audio"  # 操作的唯一标识符
    bl_label = "导入音频"  # 操作的标签
    bl_description = "导入音频文件并生成嘴型关键帧"  # 操作的描述

    filepath: bpy.props.StringProperty(subtype="FILE_PATH")  # 文件路径属性
    fps: bpy.props.IntProperty(name="FPS", default=24)  # 帧率属性

    def execute(self, context):
        scene = context.scene  # 获取当前场景
        viseme_property_name = scene.audiovismemapping.viseme_property_name  # 获取嘴型属性名称
        start_frame = scene.audiovismemapping.start_frame  # 获取起始帧
        frame_visemes = audio_to_visemes(self.filepath, self.fps, start_frame)  # 生成帧数和嘴型的列表
        obj = context.object  # 获取当前对象
        if not obj:
            self.report({'ERROR'}, "没有活动对象")  # 如果没有活动对象，报告错误
            return {'CANCELLED'}
        for frame, viseme in frame_visemes:
            obj[viseme_property_name] = viseme  # 设置对象的嘴型属性
            obj.keyframe_insert(data_path=f'["{viseme_property_name}"]', frame=frame)  # 插入关键帧

            # 将插值模式改为常值
            fcurves = obj.animation_data.action.fcurves
            for fcurve in fcurves:
                if fcurve.data_path == f'["{viseme_property_name}"]':
                    keyframe_point = fcurve.keyframe_points[-1]
                    keyframe_point.interpolation = 'CONSTANT'
        
        return {'FINISHED'}

    def invoke(self, context, event):
        context.window_manager.fileselect_add(self)  # 打开文件选择对话框
        return {'RUNNING_MODAL'}

class AUDIOVISMEMAPPING_PT_Panel(bpy.types.Panel):
    bl_label = "Audio Viseme Mapping"  # 面板的标签
    bl_idname = "AUDIOVISMEMAPPING_PT_panel"  # 面板的唯一标识符
    bl_space_type = 'PROPERTIES'  # 面板所在的空间类型
    bl_region_type = 'WINDOW'  # 面板所在的区域类型
    bl_context = "object"  # 面板的上下文

    def draw(self, context):
        layout = self.layout  # 获取布局
        scene = context.scene  # 获取当前场景

        layout.prop(scene.audiovismemapping, "viseme_property_name")  # 添加嘴型属性名称的输入框
        layout.prop(scene.audiovismemapping, "start_frame")  # 添加起始帧的输入框
        layout.operator("audiovismemapping.import_audio")  # 添加导入音频的按钮

class AUDIOVISMEMAPPING_Props(bpy.types.PropertyGroup):
    viseme_property_name: bpy.props.StringProperty(
        name="嘴型属性名称",  # 属性的名称
        description="嘴型关键帧的自定义属性名称",  # 属性的描述
        default="viseme"  # 属性的默认值
    )
    start_frame: bpy.props.IntProperty(
        name="起始帧",  # 起始帧的名称
        description="音频播放的起始帧",  # 起始帧的描述
        default=1  # 起始帧的默认值
    )

def register():
    bpy.utils.register_class(AUDIOVISMEMAPPING_OT_ImportAudio)  # 注册导入音频操作
    bpy.utils.register_class(AUDIOVISMEMAPPING_PT_Panel)  # 注册面板
    bpy.utils.register_class(AUDIOVISMEMAPPING_Props)  # 注册属性组
    bpy.types.Scene.audiovismemapping = bpy.props.PointerProperty(type=AUDIOVISMEMAPPING_Props)  # 将属性组添加到场景中

def unregister():
    bpy.utils.unregister_class(AUDIOVISMEMAPPING_OT_ImportAudio)  # 注销导入音频操作
    bpy.utils.unregister_class(AUDIOVISMEMAPPING_PT_Panel)  # 注销面板
    bpy.utils.unregister_class(AUDIOVISMEMAPPING_Props)  # 注销属性组
    del bpy.types.Scene.audiovismemapping  # 从场景中删除属性组

if __name__ == "__main__":
    register()  # 注册插件