from pydub import AudioSegment
import whisper
import numpy as np

# 加载 Whisper 模型
model = whisper.load_model("/home/peter/Public/whisper_data/base.pt")

# 加载音频文件
audio = AudioSegment.from_file("/home/peter/Public/audio_data/englishrecord/1730604585033.mp3")

# 定义片段长度（毫秒）
segment_length = 30 * 1000  # 30 秒

# 计算分割的片段数
num_segments = len(audio) // segment_length + (1 if len(audio) % segment_length > 0 else 0)

# 分割音频并保存或处理
for i in range(num_segments):
    start_time = i * segment_length
    end_time = min((i + 1) * segment_length, len(audio))
    segment = audio[start_time:end_time]
    
    # 将 pydub AudioSegment 转换为 numpy 数组
    segment_samples = segment.get_array_of_samples()
    segment_samples = np.array(segment_samples).astype(np.float32) / 32768.0  # 归一化到 [-1, 1]
    
    # 使用 Whisper 进行填充或裁剪
    segment_samples = whisper.pad_or_trim(segment_samples)
    
    # 生成 log-Mel spectrogram 并移动到与模型相同的设备
    mel = whisper.log_mel_spectrogram(segment_samples).to(model.device)

    # 检测语音语言
    _, probs = model.detect_language(mel)
    print(f"Detected language: {max(probs, key=probs.get)}")

    # 解码音频
    options = whisper.DecodingOptions()
    result = whisper.decode(model, mel, options)

    # 打印识别的文本
    print(i,":",result.text)
 