import os
import numpy as np
import logging
from funasr import AutoModel
from pydub import AudioSegment
from chinese_itn import chinese_to_num
from config import ENABLE_EMOTION, LOCAL_MODEL_DIR

logger = logging.getLogger('spk_emotion')

# ========== 全局模型（延迟初始化）==========
_asr_model = None
_emo_model = None

def init_models():
    """初始化ASR和情绪识别模型。

    优先从本地缓存目录加载，避免重复下载；
    当情绪识别开关关闭时，不加载情绪模型。
    """
    global _asr_model, _emo_model

    # 配置本地缓存目录，避免重复下载
    try:
        if LOCAL_MODEL_DIR:
            os.environ.setdefault("MODELSCOPE_CACHE", LOCAL_MODEL_DIR)
            os.environ.setdefault("FUNASR_CACHE", LOCAL_MODEL_DIR)
    except Exception:
        pass

    # 解析本地模型路径（若存在则使用路径，否则使用在线模型名）
    asr_local = os.path.join(LOCAL_MODEL_DIR, "paraformer-zh") if LOCAL_MODEL_DIR else None
    vad_local = os.path.join(LOCAL_MODEL_DIR, "fsmn-vad") if LOCAL_MODEL_DIR else None
    punc_local = os.path.join(LOCAL_MODEL_DIR, "ct-punc") if LOCAL_MODEL_DIR else None
    spk_local = os.path.join(LOCAL_MODEL_DIR, "cam++") if LOCAL_MODEL_DIR else None
    emo_local = os.path.join(LOCAL_MODEL_DIR, "emotion2vec_plus_large") if LOCAL_MODEL_DIR else None

    asr_model_ref = asr_local if asr_local and os.path.isdir(asr_local) else "paraformer-zh"
    vad_model_ref = vad_local if vad_local and os.path.isdir(vad_local) else "fsmn-vad"
    punc_model_ref = punc_local if punc_local and os.path.isdir(punc_local) else "ct-punc"
    spk_model_ref = spk_local if spk_local and os.path.isdir(spk_local) else "cam++"
    emo_model_ref = emo_local if emo_local and os.path.isdir(emo_local) else "emotion2vec_plus_large"

    if _asr_model is None:
        logger.info("正在加载ASR模型...")
        _asr_model = AutoModel(
            model=asr_model_ref,
            vad_model=vad_model_ref,
            punc_model=punc_model_ref,
            spk_model=spk_model_ref,
            disable_update=True
        )
        logger.info("ASR模型加载完成")

    # 仅当开启情绪识别时加载情绪模型
    if ENABLE_EMOTION and _emo_model is None:
        logger.info("正在加载情绪识别模型...")
        _emo_model = AutoModel(
            model=emo_model_ref,
            model_type="emotion",
            # device="cuda:0",
            device="cpu",
            disable_update=True
        )
        logger.info("情绪识别模型加载完成")

def process_audio_speech_emotion(audio_path, output_txt_path):
    """
    处理音频文件，进行语音识别和情绪分析
    
    参数:
        audio_path: 输入音频文件路径
        output_txt_path: 输出文本文件路径
    
    返回:
        bool: 处理是否成功
    """
    try:
        logger.info(f"开始处理音频文件: {audio_path}")
        
        # 确保模型已加载
        init_models()
        
        # 检查音频文件是否存在
        if not os.path.exists(audio_path):
            logger.error(f"音频文件不存在: {audio_path}")
            return False
        
        # 加载音频
        audio = AudioSegment.from_file(audio_path)
        logger.info(f"音频加载成功，时长: {len(audio)}ms")
        
        # ASR 识别
        logger.info("开始语音识别...")
        res = _asr_model.generate(
            input=audio_path,
            batch_size_s=300,
            use_itn=True,
            hotword='携转 套餐 副卡 主卡 流量包 通话时长 月费 超出套餐 国内通话 国际漫游 赠送流量 免费通话 阶梯资费 缴费 充值 自动扣费 电费账单 短信通知 在线支付 银行扣款 手机号 尾号 选号 虚拟号 卡激活 异地开户 补卡 SIM卡 实名认证 5G 4G 流量 信号 网络覆盖 宽带 上网 Wi-Fi 通讯服务 境外电话 办理 开户 关闭 升级 迁移 转套餐 咨询 投诉 携转 影响'
        )
        logger.info("语音识别完成")
        
        # 确保输出目录存在（仅当存在目录部分时）
        output_dir = os.path.dirname(output_txt_path)
        if output_dir:
            os.makedirs(output_dir, exist_ok=True)
        
        # 遍历每句音频（按配置决定是否进行情绪识别）
        if ENABLE_EMOTION:
            logger.info("开始情绪分析...")
        else:
            logger.info("情绪识别已关闭，仅输出识别文本")
        total_sentences = 0
        
        with open(output_txt_path, "w", encoding="utf-8") as f:
            for item in res:
                for sent in item.get("sentence_info", []):
                    spk = sent.get("spk", "unknown")
                    text = sent.get("text", "")
                    text = chinese_to_num(text)  # 转数字
                    start_ms = sent.get("start", 0)
                    end_ms = sent.get("end", 0)
                    
                    # 截取音频片段
                    clip = audio[start_ms:end_ms]
                    clip = clip.set_channels(1).set_frame_rate(16000)
                    
                    # 转 numpy 格式（避免写临时文件）
                    samples = np.array(clip.get_array_of_samples()).astype("float32") / 32768.0
                    
                    # 情绪识别（按开关执行）
                    if ENABLE_EMOTION:
                        try:
                            emo_res = _emo_model.generate(
                                input=samples,
                                granularity="utterance",
                                extract_embedding=False
                            )
                            if isinstance(emo_res, list) and len(emo_res) > 0:
                                result = emo_res[0]
                                labels = result.get("labels", [])
                                scores = result.get("scores", [])
                                if labels and scores:
                                    max_idx = int(np.argmax(scores))
                                    emotion = labels[max_idx]
                                    score = scores[max_idx]
                                else:
                                    emotion, score = "unknown", 0.0
                            else:
                                emotion, score = "unknown", 0.0
                        except Exception as e:
                            logger.warning(f"情绪识别失败: {e}")
                            emotion, score = "error", 0.0
                    else:
                        emotion, score = "disabled", 0.0
                    
                    # 写入结果（按开关决定是否包含情绪段）
                    if ENABLE_EMOTION:
                        emotion_part = f"{'Emotion:'+emotion+' (score={:.2f})'.format(score):>40} "
                    else:
                        emotion_part = ""

                    f.write(
                        f"Speaker{spk:<2} "
                        f"{emotion_part}"
                        f"{text}\n"
                    )
                    total_sentences += 1
        
        logger.info(f"处理完成，共识别 {total_sentences} 句话，结果保存到: {output_txt_path}")
        return True
        
    except Exception as e:
        logger.error(f"处理音频时发生错误: {str(e)}", exc_info=True)
        return False

# 兼容原有的直接运行方式
if __name__ == "__main__":
    audio_path = "aa.mp3"
    txt_path = os.path.splitext(audio_path)[0] + ".txt"
    base_dir = "/mnt/sdc/projects/env/audio-upload/ai-asr-v2"
    output_txt_path = os.path.join(base_dir, txt_path)

    if process_audio_speech_emotion(audio_path, output_txt_path):
        print(f">>> 结果已输出到 {txt_path}")
    else:
        print(">>> 处理失败")


