#! python
# -*- coding: UTF-8 -*-
"""
@Project ：ai-demo 
@File    ：test.py
@IDE     ：PyCharm 
@Author  ：zhangyiheng
@Date    ：2025/4/1 17:22 
@Describe：TODO
"""
from funasr import AutoModel

# chunk_size = [0, 10, 5] #[0, 10, 5] 600ms, [0, 8, 4] 480ms
# encoder_chunk_look_back = 4 #number of chunks to lookback for encoder self-attention
# decoder_chunk_look_back = 1 #number of encoder chunks to lookback for decoder cross-attention
#
MODEL_ROOT = "/Users/zhangzhan/Desktop/zhangzhan/modelscopes/repository-c"
model = AutoModel(
    # 主ASR模型配置（Paraformer语音识别-中文-通用-16k-离线-large-长音频版）
    model=f"{MODEL_ROOT}/iic/speech_paraformer-large-vad-punc_asr_nat-zh-cn-16k-common-vocab8404-pytorch",
    model_revision="v2.0.5",  # 模型版本号

    # # 语音活动检测(VAD)模型配置 （FSMN语音端点检测-中文-通用-16k）
    # vad_model=f"{MODEL_ROOT}/iic/speech_fsmn_vad_zh-cn-16k-common-pytorch",
    # vad_model_revision="v2.0.4",  # VAD模型版本
    # vad_kwargs={"max_single_segment_time": 60000},  # 可选：VAD高级参数（单位毫秒）
    #
    # # 标点恢复模型配置 （CT-Transformer标点-中文-通用-pytorch）
    # punc_model=f"{MODEL_ROOT}/iic/punc_ct-transformer_zh-cn-common-vocab272727-pytorch",
    # punc_model_revision="v2.0.4",  # 标点模型版本

    # 模型更新控制
    disable_update=True,  # 禁用模型自动更新
    force_download=False,  # 不强制重新下载模型
    resume_download=False,  # 不启用断点续传下载

    # 缓存和路径配置
    cache_dir=MODEL_ROOT,  # 指定模型缓存根目录（默认~/.cache）

    # device="cuda:0",  # 手动指定计算设备

    # 高级功能（以下为可选配置）
    # spk_model="cam++",  # 说话人分离模型（需额外下载）
    # spk_model_revision="v2.0.2",

    # quantize=True,  # 是否启用量化加速（需模型支持）

    # 推理参数（可在generate()时覆盖）
    # batch_size_s=300,  # 批处理大小（秒为单位）
    # hotword="阿里云",  # 热词增强（提升特定词汇识别率）
)
#
#
#
# import soundfile
# import os
#
# wav_file = os.path.join(model.model_path, "/Users/zhangzhan/Desktop/zhangzhan/modelscopes/vad_example.wav")
# speech, sample_rate = soundfile.read(wav_file)
# chunk_stride = chunk_size[1] * 960 # 600ms
#
# cache = {}
# total_chunk_num = int(len((speech)-1)/chunk_stride+1)
# for i in range(total_chunk_num):
#     speech_chunk = speech[i*chunk_stride:(i+1)*chunk_stride]
#     is_final = i == total_chunk_num - 1
#     res = model.generate(input=speech_chunk, cache=cache, is_final=is_final, chunk_size=chunk_size, encoder_chunk_look_back=encoder_chunk_look_back, decoder_chunk_look_back=decoder_chunk_look_back)
#     print(res)


from funasr import AutoModel
#
# chunk_size = 200 # ms
# #model = AutoModel(model="fsmn-vad", model_revision="v2.0.4")
#
# import soundfile
#
# wav_file = f"/Users/zhangzhan/Desktop/zhangzhan/modelscopes/vad_example.wav"
# speech, sample_rate = soundfile.read(wav_file)
# chunk_stride = int(chunk_size * sample_rate / 1000)
#
# cache = {}
# total_chunk_num = int(len((speech)-1)/chunk_stride+1)
# for i in range(total_chunk_num):
#     speech_chunk = speech[i*chunk_stride:(i+1)*chunk_stride]
#     is_final = i == total_chunk_num - 1
#     res = model.generate(input=speech_chunk, cache=cache, is_final=is_final, chunk_size=chunk_size)
#     if len(res[0]["text"]):
#         print(res)

