from fastapi import APIRouter, Request, HTTPException, Response, UploadFile

import os, sys
import uuid
from datetime import datetime
import tempfile
from typing import Dict, List, Optional

from funasr import AutoModel
from funasr.utils.postprocess_utils import rich_transcription_postprocess

from utils.FileUtil import fetch_file
from utils.MD5Util import compute_path_md5

from configs import config


# 定义路由信息
router = APIRouter(
    prefix='/audio',
    tags = ['语音识别']
)


# 加载语音识别模型
model = AutoModel(
    # model=config.service.funasr.model_path,
    # attn_implementation="eager",
    # low_cpu_mem_usage=True,
    # optimize_model=True,
    # vad_model="fsmn-vad",
    # vad_kwargs={"max_single_segment_time": 30000},
    # device="cpu",

    model="paraformer-zh",
    model_revision="v2.0.4",
    vad_model="fsmn-vad",
    vad_model_revision="v2.0.4",
    punc_model="ct-punc-c",
    punc_model_revision="v2.0.4",
    spk_model="cam++",
)


def normalize_segments(
    segments: List[Dict],
    merge_sentences: bool = True,
    merge_short_sentences: bool = True,
) -> List[Dict]:
    """规范化 ASR 分段结果

    基于标点符号和句子长度进行句子合并。

    Args:
        segments: 原始分段列表，必须包含 spk_id 和 sentence 字段
        merge_sentences: 是否通过标点符号合并句子
        merge_short_sentences: 是否合并少于 4 个字的句子到下一句

    Returns:
        规范化后的分段列表
    """
    if not segments:
        return []

    # 输入验证
    for segment in segments:
        if "sentence" not in segment or "spk_id" not in segment:
            raise ValueError("每个分段必须包含 'sentence' 和 'spk_id' 字段")

    merge_punctuations = ["，", "；", "：", "、", ",", ";", ":"]

    def ends_with_punctuation(text: str) -> bool:
        return any(text.endswith(punc) for punc in merge_punctuations)

    def can_merge_with_next(current: Dict, next_item: Dict) -> bool:
        return (
            current.get("spk_id") == next_item.get("spk_id")
            and next_item.get("sentence", "").strip()
        )

    merged_results: List[Dict] = []
    i = 0
    while i < len(segments):
        current = segments[i].copy()
        merged = False

        # 优先检查句子合并（基于标点）
        if (
            merge_sentences
            and ends_with_punctuation(current.get("sentence", ""))
            and i + 1 < len(segments)
        ):
            next_item = segments[i + 1]
            if can_merge_with_next(current, next_item):
                current["sentence"] += next_item["sentence"]  # 无空格，直接连接
                current["end_time"] = next_item.get("end_time", current.get("end_time"))
                merged_results.append(current)
                i += 2
                merged = True

        # 检查短句子合并
        if (
            not merged
            and merge_short_sentences
            and i + 1 < len(segments)
        ):
            sent_length = len(current.get("sentence", "").strip())
            if sent_length < 4:
                next_item = segments[i + 1]
                if can_merge_with_next(current, next_item):
                    current["sentence"] += " " + next_item["sentence"]  # 有空格
                    current["end_time"] = next_item.get(
                        "end_time", current.get("end_time")
                    )
                    merged_results.append(current)
                    i += 2
                    merged = True

        if not merged:
            merged_results.append(current)
            i += 1

    # 添加索引
    for idx, item in enumerate(merged_results, start=1):
        item["index"] = idx

    return merged_results


def extract_text(segments: List[Dict]) -> str:
    """从分段列表提取完整文本

    自动处理多余空格，确保文本连贯。
    """
    if not segments:
        return ""

    sentences = []
    for segment in segments:
        sentence = segment.get("sentence", "").strip()
        if sentence:
            sentences.append(sentence)

    # 用单个空格连接，并清理多余空格
    text = " ".join(sentences)
    # 清理多余空格
    import re
    text = re.sub(r'\s+', ' ', text)
    return text.strip()


def extract_segments_from_result(
    res: List[Dict],
    merge_sentences: bool = True,
    merge_short_sentences: bool = True,
) -> List[Dict]:
    """从 funASR 推理结果中提取并规范化分段信息

    Args:
        res: funASR 推理结果
        merge_sentences: 是否合并句子
        merge_short_sentences: 是否合并短句子

    Returns:
        标准分段列表
    """
    if not res:
        return []

    try:
        item = res[0]
        sentence_info = item.get("sentence_info")
        if sentence_info is None:
            raise ValueError("ASR 结果格式错误，缺少 sentence_info 字段")

        # 获取默认说话人 ID
        spk_default: Optional[str] = None
        if "spk_id" in item and item["spk_id"] is not None:
            spk_default = str(item["spk_id"])
        elif "spk" in item and item["spk"] is not None:
            spk_default = str(item["spk"])

        results: List[Dict] = []
        for s in sentence_info:
            sent_text = (s.get("text") or "").strip()
            if not sent_text:  # 跳过空句子
                continue

            try:
                st = float(s.get("start", 0.0))
            except (ValueError, TypeError):
                st = 0.0

            try:
                ed = float(s.get("end", st))
            except (ValueError, TypeError):
                ed = st

            spk_local = s.get("spk_id", s.get("spk", None))
            spk_val = str(spk_local) if spk_local is not None else spk_default

            results.append({
                "spk_id": spk_val,
                "sentence": sent_text,
                "start_time": float(st),
                "end_time": float(ed),
            })

        return normalize_segments(results, merge_sentences, merge_short_sentences)

    except Exception as e:
        print(f"提取分段信息失败: {e}")
        return []


"""
语音识别具体实现逻辑
"""
def audio_asr(source):
    # 接收前端请求的语音数据并保存到本地
    localdir = f"upload/{datetime.now().strftime('%Y-%m-%d')}"
    os.makedirs(f"{config.setting.statics.path}/{localdir}", exist_ok=True)
    localfile = f"{localdir}/{uuid.uuid4()}"

    # 支持Base64编码的语音数据和语音文件的URL地址
    fetch_file(source, f"{config.setting.statics.path}/{localfile}")

    # 模型推理，进行语音数据识别
    res = model.generate(
        input=f"{config.setting.statics.path}/{localfile}",
        batch_size_s=300,
        hotword='Obsidian',
    )

    # 获取语音识别结果并返回
    result = rich_transcription_postprocess(res[0]["text"].replace(" ",""))

    return result


"""
对外暴露的语音识别接口，接收上传的语音数据二进制文件
"""
@router.post("/audio_to_text")
def audio_to_text(audio: UploadFile):
    # 是否合并句子
    merge_sentences = True
    # 是否合并短句子
    merge_short_sentences = True

    # 接收前端请求的语音数据并保存到本地
    localdir = f"statics/upload/{datetime.now().strftime('%Y-%m-%d')}/{uuid.uuid4()}"
    os.makedirs(localdir, exist_ok=True)
    localfile = f"{localdir}/{audio.filename}"

    # 将二进制的语音文件保存到本地
    with open(localfile, "wb") as f:
        f.write(audio.file.read())

    # 模型推理，进行语音数据识别
    res = model.generate(
        input=localfile,
        batch_size_s=300,
        hotword='Obsidian',
    )

    return res

    # 获取语音识别结果并返回
    # result = rich_transcription_postprocess(res[0]["text"].replace(" ",""))

    # return result


def do_audio_translations(audio_file, merge_sentences, merge_short_sentences):
    # 模型推理，进行语音数据识别
    res = model.generate(
        input=audio_file,
        batch_size_s=300,
        hotword='Obsidian',
    )

    # 提取和规范化结果
    segments = extract_segments_from_result(
        res, merge_sentences, merge_short_sentences
    )

    return {
        "status": "success",
        "text": res[0]["text"].replace(" ",""),
        "segments": segments
    }


@router.post("/translations")
async def audio_translations(request: Request):
    data = await request.json()

    audio_url = data.get("audio_url")
    # 是否合并句子
    merge_sentences = True
    # 是否合并短句子
    merge_short_sentences = True

    audio_url_md5 = compute_path_md5(audio_url)

    audio_dir = f"{config.setting.statics.path}/process/audio/{audio_url_md5}"
    os.makedirs(audio_dir, exist_ok=True)

    audio_file = f"{config.setting.statics.path}/process/audio/{audio_url_md5}/audio.m4a"

    if not os.path.exists(audio_file):
        fetch_file(audio_url, audio_file)

    return do_audio_translations(audio_file, merge_sentences, merge_short_sentences)