# 分析、总结相关的API
import json
from io import BytesIO
from typing import Annotated, Dict, List

import aiofiles
from fastapi import APIRouter, UploadFile, File, Form, HTTPException
from fastapi.params import Path
from fastapi.responses import FileResponse, StreamingResponse
from pydub import AudioSegment
from sqlalchemy.orm import Session

from core.config import audio_root, report_root
from models import engine, MessageAnalysis, ChatAnalysis
from prompts.analyzer import *
from schemas.analyzer import *
from services.docx_generate import generate_docx_report
from services.existential_check import get_message, get_analysis, get_chat, get_summary
from services.global_analyzer import get_messages_and_analyses
from services.openai_service import stream_generate, generate
from services.stream_output import stream
from services.xunfei_ise import evaluate

router = APIRouter()


@router.get(
    path="/language-use/analysis/{message_id}/stream",
    summary="获取语言使用分析（流式）",
    tags=["analysis"],
    response_class=StreamingResponse,
)
async def get_language_use_analysis(message_id: Annotated[str, Path()]):
    analysis = get_analysis(message_id)
    if analysis.language_use_analysis is not None:
        return StreamingResponse(
            stream(analysis.language_use_analysis),
            media_type="text/event-stream",
        )
    else:
        llm_messages = [
            {
                "role": "user",
                "content": prompt_for_language_use_analysis(analysis.message.content),
            },
        ]
        return StreamingResponse(
            content=stream_generate(llm_messages), media_type="text/event-stream"
        )


@router.get(
    path="/language-use/analysis/{message_id}",
    summary="获取语言使用分析",
    tags=["analysis"],
    response_model=GetAnalysisResponse,
)
async def get_language_use_analysis(message_id: Annotated[str, Path()]):
    analysis = get_analysis(message_id)
    if analysis.language_use_analysis is not None:
        return GetAnalysisResponse(
            category="language_use",
            content=analysis.language_use_analysis,
        )
    else:
        llm_messages = [
            {
                "role": "user",
                "content": prompt_for_language_use_analysis(analysis.message.content),
            },
        ]
        return GetAnalysisResponse(
            category="language_use",
            content=generate(messages=llm_messages),
        )


@router.get(
    path="/language-use/summary/{chat_id}",
    summary="获取语言使用分析的总结",
    tags=["summary"],
    response_class=StreamingResponse,
)
def get_language_use_summary(
    chat_id: Annotated[str, Path()],
):
    messages = get_messages_and_analyses(chat_id=chat_id)
    messages_with_language_use_analysis: List[Dict[str, str]] = []
    for msg in messages:
        if msg.role == "user" and msg.language_use_analysis is not None:
            messages_with_language_use_analysis.append(
                {
                    "content": msg.content,
                    "language_use_analysis": msg.language_use_analysis,
                }
            )
    if not messages_with_language_use_analysis:
        raise HTTPException(status_code=404, detail="Message not found")

    llm_messages = [
        {
            "role": "user",
            "content": prompt_for_language_use_summary(
                json.dumps(messages_with_language_use_analysis, ensure_ascii=False)
            ),
        }
    ]
    return StreamingResponse(
        stream_generate(llm_messages, sentence_split=False),
        media_type="text/event-stream",
    )


@router.post(path="/pronunciation/evaluation", summary="评估发音", tags=["analysis"])
async def upload_pronunciation(
    message_id: Annotated[str, Form(description="Message的ID，为UUID格式。")],
    audio: Annotated[UploadFile, File()],
):
    message = get_message(message_id)

    audio_bytes = await audio.read()

    sound = AudioSegment.from_file(BytesIO(audio_bytes), format="webm")
    sound = sound.set_frame_rate(16000).set_sample_width(2).set_channels(1)

    # 导出为 PCM 原始数据
    pcm_data = sound.raw_data
    pcm_audio_path = audio_root / f"{message_id}.pcm"
    # 保存 PCM 数据到文件
    async with aiofiles.open(pcm_audio_path, mode="wb") as f:
        await f.write(pcm_data)

    pronunciation_evaluation = await evaluate(
        pcm_address=pcm_audio_path, text=message.content
    )
    if not pronunciation_evaluation:
        raise HTTPException(status_code=500, detail="发音评估失败")
    result = pronunciation_evaluation["read_sentence"]["rec_paper"]

    # save to db
    if message.analysis:
        analysis = message.analysis
    else:
        analysis = MessageAnalysis(message_id=message.id)

    analysis.pronunciation_score = str(result)

    with Session(engine) as session:
        if not message.analysis:
            session.add(analysis)
        session.commit()

    return {"result": result}


@router.get(
    path="/pronunciation/analysis/{message_id}/stream",
    summary="获取发音分析（流式）",
    tags=["analysis"],
    response_class=StreamingResponse,
)
async def get_pronunciation_analysis(
    message_id: Annotated[str, Path(description="Message的ID，为UUID格式。")],
):
    analysis = get_analysis(message_id)
    if analysis.pronunciation_analysis:
        return StreamingResponse(
            content=stream(content=analysis.pronunciation_analysis),
            media_type="text/event-stream",
        )
    else:
        llm_messages = [
            {
                "role": "user",
                "content": prompt_for_pronunciation_analysis(
                    analysis.pronunciation_score
                ),
            }
        ]

        return StreamingResponse(
            content=stream_generate(llm_messages), media_type="text/event-stream"
        )


@router.get(
    path="/pronunciation/analysis/{message_id}",
    summary="获取发音分析",
    tags=["analysis"],
    response_model=GetAnalysisResponse,
)
async def get_pronunciation_analysis(
    message_id: Annotated[str, Path(description="Message的ID，为UUID格式。")],
):
    analysis = get_analysis(message_id)
    if analysis.pronunciation_analysis:
        return GetAnalysisResponse(
            category="pronunciation",
            content=analysis.pronunciation_analysis,
        )
    else:
        llm_messages = [
            {
                "role": "user",
                "content": prompt_for_pronunciation_analysis(
                    analysis.pronunciation_score
                ),
            }
        ]
        return GetAnalysisResponse(
            category="pronunciation", content=generate(messages=llm_messages)
        )


@router.post(path="/analysis", summary="保存分析", tags=["analysis"])
def save_pronunciation_analysis(request: PostAnalysisRequest):
    message = get_message(request.message_id)
    if not message.analysis:
        analysis = MessageAnalysis(message_id=message.id)
    else:
        analysis = message.analysis

    if request.category == "language_use":
        analysis.language_use_analysis = request.content
    elif request.category == "pronunciation":
        analysis.pronunciation_analysis = request.content

        # 如果是新创建的分析对象，则添加到会话中
    with Session(engine) as session:
        if not message.analysis:
            session.add(analysis)

        session.commit()
        session.refresh(analysis)

    return {"status": 1, "analysis_id": analysis.id}


@router.get(
    path="/pronunciation/summary/{chat_id}",
    summary="获取发音分析的总结",
    tags=["summary"],
    response_class=StreamingResponse,
)
def get_global_analysis(
    chat_id: Annotated[str, Path()],
):
    messages = get_messages_and_analyses(chat_id=chat_id)
    messages_with_pronunciation_analysis = []
    for msg in messages:
        if msg.role == "user" and msg.pronunciation_analysis is not None:
            messages_with_pronunciation_analysis.append(
                {
                    "content": msg.content,
                    "pronunciation_analysis": msg.pronunciation_analysis,
                }
            )

    if not messages_with_pronunciation_analysis:
        raise HTTPException(status_code=404, detail="Message not found")

    llm_messages = [
        {
            "role": "user",
            "content": prompt_for_pronunciation_summary(
                json.dumps(
                    messages_with_pronunciation_analysis, indent=2, ensure_ascii=False
                )
            ),
        }
    ]
    return StreamingResponse(
        stream_generate(llm_messages, sentence_split=False),
        media_type="text/event-stream",
    )


@router.get(
    path="/communication-skill/summary/{chat_id}",
    summary="获取沟通技能分析的总结",
    tags=["summary"],
    response_class=StreamingResponse,
)
def get_communication_skill_summary(
    chat_id: Annotated[str, Path()],
):
    messages = get_messages_and_analyses(chat_id=chat_id)
    messages_by_string = ""
    for message in messages:
        if message.role == "user":
            messages_by_string += f"学生: {message.content}\n"
        else:
            messages_by_string += f"外语陪练: {message.content}\n"
    llm_messages = [{"role": "user", "content": prompt_for_communication_skill_summary}]
    return StreamingResponse(
        stream_generate(llm_messages, sentence_split=False),
        media_type="text/event-stream",
    )


@router.post(path="/summary", summary="保存总结", tags=["summary"])
def save_communication_skill_summary(request: PostSummaryRequest):
    chat = get_chat(request.chat_id)
    if not chat.analysis:
        analysis = ChatAnalysis(chat_id=chat.id)
    else:
        analysis = chat.analysis

    analysis.communication_skill_analysis = request.communication_skill_analysis
    analysis.language_use_analysis = request.language_use_analysis
    analysis.pronunciation_analysis = request.pronunciation_analysis

    with Session(engine) as session:
        if chat.analysis is None:
            session.add(analysis)
        session.commit()
        session.refresh(analysis)
    return {"status": 1, "summary_id": analysis.id}


@router.get(
    path="/summary/{chat_id}/report.docx",
    summary="获取分析报告的 docx 文件",
    tags=["summary"],
    response_class=FileResponse,
)
def get_report_docx(chat_id: Annotated[str, Path()]):
    analysis = get_summary(chat_id)
    chat = analysis.chat

    situation = "自由对话" if chat.mode == 1 else chat.system_prompt
    message_reports = get_messages_and_analyses(chat_id=chat_id)

    # 如果 report_path 已存在，直接使用；否则生成新报告并保存路径
    docx_report_path = report_root / f"{chat.id}.docx"
    if not docx_report_path.exists():
        docx_report_path = generate_docx_report(
            name=chat.id,
            created_at=chat.created,
            situation=situation,
            message_analyses=message_reports,
            language_use_analysis=analysis.language_use_analysis,
            pronunciation_analysis=analysis.pronunciation_analysis,
            communication_skill_analysis=analysis.communication_skill_analysis,
        )

    return FileResponse(
        path=str(docx_report_path),
        filename="report.docx",
        media_type="application/vnd.openxmlformats-officedocument.wordprocessingml.document",
    )
