# coding = utf-8
# @Time    : 2024-10-13  08:21:58
# @Author  : zhaosheng@nuaa.edu.cn
# @Describe: Preprocess the media file.

# pip install dspeech==0.0.1 dguard==0.1.21
# export DGUARD_MODEL_PATH=/models/dguard_models
# export DSPEECH_HOME=/models/dspeech_models
from dspeech import STT
from dguard import DguardModel as dm

from rich.console import Console
console = Console()

# init asr model
stt_model = STT()

# init dguard model
dm_model = dm(
    embedding_model_names=["eres2net_cn_common_200k", "campp_cn_common_200k"],
    device="cuda",
    length=10, # 每个片段10秒
    channel=0, # 选择第一个通道
    max_split_num=5, # 最多分割5个片段
    start_time=0,    # 每个音频从0秒开始处理
    mean=True,    # 返回所有片段特征的平均值
    verbose=True, # 输出详细日志,默认在DGUARD_MODEL_PATH/logs/%Y%m%d-%H%M%S.log
    apply_vad=True, # 声纹编码前自动应用VAD
    vad_smooth_threshold=0.25, # VAD处理的平滑阈值,两个语音段之间的间隔小于该值时合并
    vad_min_duration=0.3, # VAD处理的最小语音段持续时间,平滑后的语音段小于该值时被丢弃
    save_vad_path=None, # 不自动保存VAD结果
    diar_num_spks=None,
    diar_min_num_spks=1,
    diar_max_num_spks=4,
    diar_min_duration=0.3,
    diar_window_secs=1.5,
    diar_period_secs=0.75,
    diar_frame_shift=10,
    diar_batch_size=4, # 聚类时进行子片段声纹编码的批处理大小
    diar_subseg_cmn=True
)

def rich_print(content):
    colors = ["red", "green", "blue", "yellow", "magenta", "cyan"]
    for idx, line in enumerate(content.split("\n")):
        if ":" not in line:
            continue
        spk_id = line.split(":")[0].split(" ")[-1]
        console.print(f"[{colors[int(spk_id) % 6]}]{line}")

def get_diarization_content(file_path, emotion_time_threshold=2):
    # Step 1: Speaker Diarization
    r = dm_model.diarize(file_path)
    # [('dguard', 0.322, 3.352, 0), ('dguard', 3.49, 6.36, 2), ('dguard', 7.298, 8.423, 3),
    # ('dguard', 47.81, 48.19, 2), ('dguard', 48.578, 49.148, 2), ...]
    # Step 2: Speech-to-Text
    all_content = ""
    last_spk = ""
    for data in dm_model.diarize(file_path):
        spk_label = data[3]
        start_time = data[1]
        end_time = data[2]
        generate_text = stt_model.transcribe_file(file_path, start=start_time, end=end_time)
        # generate_text is the transcribed text(not punctuated)
        if end_time - start_time > emotion_time_threshold:
            # We only classify the emotion when the duration is longer than <emotion_time_threshold> seconds
            # So, if you dont need the emotion, you can set emotion_time_threshold to a large number such as 1000. :)
            # Next, get the speech emotion
            emotion = stt_model.emo_classify_file(file_path, start=start_time, end=end_time)
            # {'key': 'rand_key_2j1IfWqrAz2Kr',
            # 'labels': ['生气/angry', '厌恶/disgusted', '恐惧/fearful', '开心/happy', '中立/neutral', '其他/other', '难过/sad', '吃惊/surprised', '<unk>'],
            # 'scores': [3.02, 0.20, 1.07e-05, 0.01, 0.78, 2e-06, 7e-06, 8e-06, 6e-08]}
            # get the emotion label
            emotion_label = emotion["labels"][emotion["scores"].index(max(emotion["scores"]))]
            emotion_score = max(emotion["scores"])
            emotion_text = f"(emotion：{emotion_label} with score: {emotion_score:.2f})"
        else:
            emotion_text = ""
        if spk_label != last_spk:
            all_content += f"\nSpeaker {spk_label}: {generate_text} "+emotion_text
            last_spk = spk_label
        else:
            all_content += f" {generate_text}"
    return all_content

if __name__ == "__main__":
    file1 = "/home/zhaosheng/Documents/dguard_project/test/data/1channel2person.wav"
    content = get_diarization_content(file1)
    rich_print(content)
    
    