from faster_whisper import WhisperModel
import json
import time
import os
import soundfile as sf
from datetime import datetime
import glob

def transcribe_audio(audio_path, model_size="large-v3", save_json=False, output_file="transcription_result.json"):
    """
    使用Faster Whisper模型转写音频并返回JSON格式结果
    
    参数:
        audio_path (str): 音频文件路径
        model_size (str): 模型大小，默认为"large-v3"
        save_json (bool): 是否保存JSON结果到文件，默认为False
        output_file (str): 保存JSON的文件名，默认为"transcription_result.json"
        
    返回:
        dict: 包含转写结果的字典
    """
    # 记录总处理开始时间
    total_start_time = time.time()
    
    # 加载模型
    model = WhisperModel(model_size)
    
    # 记录模型处理开始时间
    model_start_time = time.time()
    
    # 执行转写
    segments, info = model.transcribe(audio_path)
    
    # 计算模型处理时间
    model_processing_time = time.time() - model_start_time
    
    # 将segments转换为列表（因为它可能是生成器）
    segments_list = list(segments)
    
    # 创建结果字典
    result = {
        "detected_language": info.language,
        "language_probability": info.language_probability,
        "model_processing_time_seconds": round(model_processing_time, 2),
        "total_processing_time_seconds": round(time.time() - total_start_time, 2),
        "timestamp": datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
        "segments": []
    }
    
    # 添加所有语音片段
    for segment in segments_list:
        segment_info = {
            "start": segment.start,
            "end": segment.end,
            "text": segment.text
        }
        result["segments"].append(segment_info)
    
    # 生成JSON格式结果
    json_result = json.dumps(result, ensure_ascii=False, indent=2)
    
    # 打印JSON结果
    print(json_result)
    
    # 如果需要，保存到文件
    if save_json:
        with open(output_file, "w", encoding="utf-8") as f:
            f.write(json_result)
    
    # 最终更新总处理时间
    result["total_processing_time_seconds"] = round(time.time() - total_start_time, 2)
    
    return result

def get_audio_info(audio_path):
    """
    获取音频文件的信息
    
    参数:
        audio_path (str): 音频文件路径
        
    返回:
        dict: 包含音频信息的字典
    """
    try:
        audio_info = sf.info(audio_path)
        return {
            "duration": round(audio_info.duration, 2),
            "samplerate": audio_info.samplerate,
            "channels": audio_info.channels,
            "format": audio_info.format,
            "subtype": audio_info.subtype
        }
    except Exception as e:
        print(f"获取音频信息时出错: {str(e)}")
        return {"error": str(e)}

def transcribe_audio_complete(audio_path, model_size="large-v3", save_json=False, output_file=None):
    """
    使用Faster Whisper模型转写音频并返回完整的JSON格式结果，包含音频文件信息
    
    参数:
        audio_path (str): 音频文件路径
        model_size (str): 模型大小，默认为"large-v3"
        save_json (bool): 是否保存JSON结果到文件，默认为False
        output_file (str): 保存JSON的文件名，默认为None（自动生成）
        
    返回:
        dict: 包含完整转写结果的字典
    """
    # 记录总处理开始时间
    total_start_time = time.time()
    
    # 如果未指定输出文件，则自动生成
    if output_file is None and save_json:
        output_file = f"{os.path.splitext(audio_path)[0]}_transcript.json"
    
    # 获取音频信息
    audio_info = get_audio_info(audio_path)
    
    # 加载模型
    model = WhisperModel(model_size)
    
    # 记录模型处理开始时间
    model_start_time = time.time()
    
    # 执行转写
    segments, info = model.transcribe(audio_path)
    
    # 计算模型处理时间
    model_processing_time = time.time() - model_start_time
    
    # 将segments转换为列表（因为它可能是生成器）
    segments_list = list(segments)
    
    # 提取所有文本
    all_text = " ".join([segment.text for segment in segments_list])
    
    # 创建完整结果字典
    result = {
        "audio_file": audio_path,
        "audio_duration_seconds": audio_info.get("duration", 0),
        "audio_info": audio_info,
        "model_processing_time_seconds": round(model_processing_time, 2),
        "total_processing_time_seconds": round(time.time() - total_start_time, 2),
        "timestamp": datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
        "model_id": f"faster-whisper-{model_size}",
        "detected_language": info.language,
        "language_probability": info.language_probability,
        "text": all_text,
        "segments": []
    }
    
    # 添加所有语音片段
    for segment in segments_list:
        segment_info = {
            "start": segment.start,
            "end": segment.end,
            "text": segment.text
        }
        result["segments"].append(segment_info)
    
    # 生成JSON格式结果
    json_result = json.dumps(result, ensure_ascii=False, indent=2)
    
    # 打印JSON结果
    print(json_result)
    
    # 如果需要，保存到文件
    if save_json:
        with open(output_file, "w", encoding="utf-8") as f:
            f.write(json_result)
        print(f"JSON结果已保存至: {output_file}")
        
        # 同时保存纯文本结果
        text_file = f"{os.path.splitext(audio_path)[0]}_transcript.txt"
        with open(text_file, "w", encoding="utf-8") as f:
            f.write(all_text)
        print(f"文本结果已保存至: {text_file}")
    
    # 最终更新总处理时间
    result["total_processing_time_seconds"] = round(time.time() - total_start_time, 2)
    
    return result

def transcribe_directory(directory=".", extensions=["wav", "mp3", "flac", "m4a"], 
                         model_size="large-v3", save_json=False):
    """
    处理目录中所有支持的音频文件
    
    参数:
        directory (str): 音频文件目录，默认为当前目录
        extensions (list): 支持的音频文件扩展名列表
        model_size (str): 模型大小
        save_json (bool): 是否保存JSON结果
        
    返回:
        dict: 包含所有音频文件转写结果的字典
    """
    results = {}
    
    # 查找所有支持的音频文件
    audio_files = []
    for ext in extensions:
        audio_files.extend(glob.glob(os.path.join(directory, f"*.{ext}")))
    
    if not audio_files:
        print(f"在目录 {directory} 中未找到支持的音频文件")
        return results
    
    print(f"找到 {len(audio_files)} 个音频文件")
    
    # 加载模型（只加载一次）
    model = WhisperModel(model_size)
    
    # 处理每个音频文件
    for audio_path in audio_files:
        print(f"正在处理: {audio_path}")
        
        # 记录总处理开始时间
        total_start_time = time.time()
        
        # 获取音频信息
        audio_info = get_audio_info(audio_path)
        
        # 记录模型处理开始时间
        model_start_time = time.time()
        
        try:
            # 执行转写
            segments, info = model.transcribe(audio_path)
            
            # 计算模型处理时间
            model_processing_time = time.time() - model_start_time
            
            # 将segments转换为列表（因为它可能是生成器）
            segments_list = list(segments)
            
            # 提取所有文本
            all_text = " ".join([segment.text for segment in segments_list])
            
            # 创建结果字典
            result = {
                "audio_file": audio_path,
                "audio_duration_seconds": audio_info.get("duration", 0),
                "audio_info": audio_info,
                "model_processing_time_seconds": round(model_processing_time, 2),
                "total_processing_time_seconds": round(time.time() - total_start_time, 2),
                "timestamp": datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
                "model_id": f"faster-whisper-{model_size}",
                "detected_language": info.language,
                "language_probability": info.language_probability,
                "text": all_text,
                "segments": []
            }
            
            # 添加所有语音片段
            for segment in segments_list:
                segment_info = {
                    "start": segment.start,
                    "end": segment.end,
                    "text": segment.text
                }
                result["segments"].append(segment_info)
            
            results[audio_path] = result
            
            # 如果需要，保存到文件
            if save_json:
                output_file = f"{os.path.splitext(audio_path)[0]}_transcript.json"
                with open(output_file, "w", encoding="utf-8") as f:
                    f.write(json.dumps(result, ensure_ascii=False, indent=2))
                print(f"JSON结果已保存至: {output_file}")
                
                # 同时保存纯文本结果
                text_file = f"{os.path.splitext(audio_path)[0]}_transcript.txt"
                with open(text_file, "w", encoding="utf-8") as f:
                    f.write(all_text)
                print(f"文本结果已保存至: {text_file}")
            
            # 最终更新总处理时间
            result["total_processing_time_seconds"] = round(time.time() - total_start_time, 2)
            results[audio_path] = result
                
        except Exception as e:
            error_msg = f"处理音频 {audio_path} 时出错: {str(e)}"
            print(error_msg)
            results[audio_path] = {"error": error_msg, "audio_file": audio_path}
    
    return results

if __name__ == "__main__":
    # 示例用法
    # audio_path = "参会者，您好.wav"
    audio_path="unit33.mp3"#russia
    
    # 使用完整处理函数
    result = transcribe_audio_complete(audio_path)
    
    # 如果要处理整个目录
    # results = transcribe_directory(save_json=True)
