import os
import sys
from pathlib import Path
from fastapi import FastAPI, WebSocket, Query, Body, APIRouter
from whisper_asr import WhisperASR
from analyze_audio_emotion import RobustAudioEmotionAnalyzer
from stress_detector import StressDetector
from pydantic import BaseModel
from typing import List, Any
import json
from src.utils.convert_multimodal_input_util import convert_multimodal_input
from multimodal.predictor import InterviewPredictor
from src.service.extract_tech_stack.predictor import TechStackPredictor, jieba_tokenizer
from fastapi.responses import JSONResponse

# 获取项目根目录的绝对路径
def get_project_root():
    """获取项目根目录路径，兼容不同操作系统和部署环境"""
    current_file = Path(__file__).resolve()
    # 从当前文件向上查找到包含model目录的根目录
    project_root = current_file.parent.parent.parent  # src/service -> src -> project_root
    return project_root

# 获取模型路径的统一函数
def get_model_path(model_name):
    """获取模型路径，自动适配不同操作系统"""
    project_root = get_project_root()
    model_path = project_root / "model" / model_name
    return str(model_path)

app = FastAPI()
# 强制离线模式
os.environ["HF_DATASETS_OFFLINE"] = "1"
os.environ["TRANSFORMERS_OFFLINE"] = "1"
os.environ["HF_HUB_OFFLINE"] = "1"

# 定义数据结构
class InterviewContent(BaseModel):
    speechRate: List[Any]
    audioEmotion: List[str]
    facialActions: List[Any]
    bodyMovements: List[Any]
    microExpression: List[str]
    stress: List[str]

# 初始化情感分析器（强制CPU）
try:
    emotion_analyzer = RobustAudioEmotionAnalyzer(
        model_path=get_model_path("w2v2-L-robust"),
        window_size=5,
        chunk_size=1.5
    )
    print("情感分析模型加载成功")
except Exception as e:
    print(f"情感分析模型加载失败: {str(e)}")
    emotion_analyzer = None

# 初始化压力检测器（强制CPU）
try:
    stressDetector = StressDetector()
    print("压力检测模型加载成功")
except Exception as e:
    print(f"压力检测模型加载失败: {str(e)}")
    stressDetector = None

# 初始化ASR引擎（单独启用GPU）
def init_asr_engine():
    try:
        asr = WhisperASR(
            model_path=get_model_path("faster-whisper-medium")
        )
        print("ASR语音识别模型加载成功")
        return asr
    except Exception as e:
        print(f"ASR语音识别模型加载失败: {str(e)}")
        return None

asr_engine = init_asr_engine()

# 初始化多模态评分模型（全局加载一次）
try:
    model_path = get_model_path("scoring_model.pth")
    interview_predictor = InterviewPredictor(model_path=model_path)
    print("多模态评分模型加载成功")
except Exception as e:
    print(f"多模态评分模型加载失败: {str(e)}")
    interview_predictor = None

# 初始化技术栈提取模型
try:
    tech_stack_model_path = get_model_path("tech_stack_model")
    
    # 解决pickle加载问题：
    # 模型在保存时，将jieba_tokenizer函数记录为属于'__main__'模块。
    # 当此server_api.py作为主程序运行时，我们必须将导入的jieba_tokenizer函数手动添加到'__main__'模块的命名空间中，
    # 以便pickle能够找到它。
    sys.modules['__main__'].jieba_tokenizer = jieba_tokenizer
    
    tech_stack_predictor = TechStackPredictor(model_dir=tech_stack_model_path)
    print("技术栈提取模型加载成功")
except Exception as e:
    print(f"技术栈提取模型加载失败: {str(e)}")
    tech_stack_predictor = None

#多模态评分
@app.post("/multimodal/scoring")
async def analyze_interview_data(content: InterviewContent):
    try:
        if interview_predictor is None:
            return JSONResponse(
                content={"error": "多模态评分模型未成功加载，无法进行评分"},
                status_code=500
            )

        print(content)

        converted_data = convert_multimodal_input(content.model_dump())
        multimodal_score = interview_predictor.predict(converted_data)
        
        # 返回评分结果
        return {"score": multimodal_score}
    except Exception as e:
        error_msg = f"多模态评分异常: {str(e)}"
        print(error_msg)
        return JSONResponse(
            content={"error": error_msg},
            status_code=500
        )


# 检测谎言
@app.get("/stress/detection")
def analyze_audio(path: str = Query(..., description="用户音频网络链接")):
    import requests
    import tempfile
    import uuid
    
    if stressDetector is None:
        return {"detail": "压力检测模型未成功加载，无法进行分析"}
    
    temp_file_path = None
    try:
        # 下载网络音频文件到临时文件
        response = requests.get(path, timeout=30)
        response.raise_for_status()  # 检查HTTP错误
        
        # 从URL中提取文件扩展名
        from urllib.parse import urlparse
        parsed_url = urlparse(path)
        original_filename = os.path.basename(parsed_url.path)
        file_extension = os.path.splitext(original_filename)[1] or '.webm'  # 默认为.webm
        
        # 创建临时文件，保持原始扩展名
        temp_dir = tempfile.gettempdir()
        temp_filename = f"stress_audio_{uuid.uuid4().hex}{file_extension}"
        temp_file_path = os.path.join(temp_dir, temp_filename)
        
        # 将音频数据写入临时文件
        with open(temp_file_path, 'wb') as temp_file:
            temp_file.write(response.content)

        # 压力检测
        result = stressDetector.analyze(temp_file_path)
        if not result:
            return {"detail": "分析失败，可能音频格式错误或特征提取失败"}
        print(f"压力检测结果：{result}")
        return result
        
    except requests.exceptions.RequestException as e:
        error_msg = f"下载音频文件失败：{str(e)}"
        print(f"------------压力检测下载异常：{error_msg}")
        return {"detail": error_msg}
    except Exception as e:
        error_msg = f"压力检测异常: {str(e)}"
        print(f"------------压力检测异常：{error_msg}")
        return {"detail": error_msg}
    finally:
        # 清理临时文件
        if temp_file_path and os.path.exists(temp_file_path):
            try:
                os.remove(temp_file_path)
                print(f"已清理压力检测临时音频文件: {temp_file_path}")
            except Exception as e:
                print(f"清理临时文件失败: {e}")

# 分析音频情感
@app.get("/analyze-emotion")
def analyze_emotion(path: str = Query(..., description="用户音频网络链接")):
    import requests
    import tempfile
    import uuid
    
    if emotion_analyzer is None:
        return JSONResponse(content={
            "emotion": None,
            "emotion_sequence": None,
            "detail": "情感分析模型未成功加载，无法进行分析"
        }, status_code=500)
    
    temp_file_path = None
    try:
        # 下载网络音频文件到临时文件
        response = requests.get(path, timeout=30)
        response.raise_for_status()  # 检查HTTP错误
        
        # 从URL中提取文件扩展名
        from urllib.parse import urlparse
        parsed_url = urlparse(path)
        original_filename = os.path.basename(parsed_url.path)
        file_extension = os.path.splitext(original_filename)[1] or '.webm'  # 默认为.webm
        
        # 创建临时文件，保持原始扩展名
        temp_dir = tempfile.gettempdir()
        temp_filename = f"audio_{uuid.uuid4().hex}{file_extension}"
        temp_file_path = os.path.join(temp_dir, temp_filename)
        
        # 将音频数据写入临时文件
        with open(temp_file_path, 'wb') as temp_file:
            temp_file.write(response.content)
        
        # 分析音频文件，获取完整结果
        result = emotion_analyzer.process_audio(temp_file_path)
        
        # 如果结果为空或没有识别到情感
        if not result or "emotion" not in result:
            return JSONResponse(content={
                "emotion": None,
                "emotion_sequence": None,
                "detail": "未识别到有效的音频情感"
            })

        # 返回完整结果，包括原始情感标签和时间序列情感标签
        return JSONResponse(content={
            "emotion": result.get("emotion"),
            "emotion_sequence": result.get("emotion_sequence"),
        })
        
    except requests.exceptions.RequestException as e:
        error_msg = f"下载音频文件失败：{str(e)}"
        print(error_msg)
        return JSONResponse(
            content={
                "emotion": None,
                "emotion_sequence": None,
                "detail": error_msg
            },
            status_code=400
        )
    except Exception as e:
        error_msg = f"情感分析异常：{str(e)}"
        print(error_msg)
        return JSONResponse(
            content={
                "emotion": None,
                "emotion_sequence": None,
                "detail": error_msg
            },
            status_code=500
        )
    finally:
        # 清理临时文件
        if temp_file_path and os.path.exists(temp_file_path):
            try:
                os.remove(temp_file_path)
                print(f"已清理临时音频文件: {temp_file_path}")
            except Exception as e:
                print(f"清理临时文件失败: {e}")

# 语音识别
@app.websocket("/ASR")
async def asr_websocket(websocket: WebSocket):
    import requests
    import tempfile
    import uuid
    from urllib.parse import urlparse

    await websocket.accept()
    
    if asr_engine is None:
        await websocket.send_text(json.dumps({
            "error": "ASR语音识别模型未成功加载，无法进行识别",
            "code": -1
        }))
        return

    try:
        while True:
            message = await websocket.receive_text()
            data = json.loads(message)
            path = data.get("path")
            
            temp_file_path = None
            try:
                # 下载网络音频文件到临时文件
                response = requests.get(path, timeout=30)
                response.raise_for_status()  # 检查HTTP错误
                
                # 从URL中提取文件扩展名
                parsed_url = urlparse(path)
                original_filename = os.path.basename(parsed_url.path)
                file_extension = os.path.splitext(original_filename)[1] or '.webm'  # 默认为.webm
                
                # 创建临时文件，保持原始扩展名
                temp_dir = tempfile.gettempdir()
                temp_filename = f"asr_audio_{uuid.uuid4().hex}{file_extension}"
                temp_file_path = os.path.join(temp_dir, temp_filename)
                
                # 将音频数据写入临时文件
                with open(temp_file_path, 'wb') as temp_file:
                    temp_file.write(response.content)

                result = asr_engine.transcribe(temp_file_path)
                # 逐个音频段返回
                full_text = ""  # 初始化整段文本内容
                for seg in result["segments"]:
                    full_text += seg["text"]  # 拼接每段文字
                    await websocket.send_text(json.dumps({
                        "language": result["language"],
                        "text": seg["text"],
                        "code": 1
                    }))

                # 返回语音识别结束信号、语速 和 整体文字
                await websocket.send_text(json.dumps({
                    "language": result["language"],
                    "text": "",
                    "overallSpeed": result["overall_speed"],
                    "speedUnit": result["speed_unit"],
                    "fullText": full_text,  # 新增字段
                    "code": 2
                }))
                
            except requests.exceptions.RequestException as e:
                # 发送下载错误信息
                await websocket.send_text(json.dumps({
                    "error": f"下载音频文件失败：{str(e)}",
                    "code": -1
                }))
            except Exception as e:
                # 发送处理错误信息
                await websocket.send_text(json.dumps({
                    "error": f"语音识别异常：{str(e)}",
                    "code": -1
                }))
            finally:
                # 清理临时文件
                if temp_file_path and os.path.exists(temp_file_path):
                    try:
                        os.remove(temp_file_path)
                        print(f"已清理ASR临时音频文件: {temp_file_path}")
                    except Exception as e:
                        print(f"清理临时文件失败: {e}")
                        
    except Exception as e:
        print("WebSocket error:", e)

# 提取技术栈
@app.post("/extract-tech-stack")
def extract_tech_stack(questions: List[str] = Body(...), return_scores: bool = Query(True)):
    """
    提取面试题中的技术栈

    Args:
        questions: 包含面试题列表的请求体
        return_scores: 是否返回分数

    Returns:
        提取的技术栈和分数（每条对应）
    """
    try:
        if tech_stack_predictor is None:
            return JSONResponse(
                content={"error": "技术栈提取模型未成功加载，无法进行提取"},
                status_code=500
            )

        results = []
        if return_scores:
            # 每条题目提取技术栈和得分
            for question in questions:
                tech_stacks, scores = tech_stack_predictor.predict(question, return_scores=True)
                results.append({
                    "question": question,
                    "tech_stacks": tech_stacks,
                    "scores": scores
                })
        else:
            for question in questions:
                tech_stacks = tech_stack_predictor.predict(question, return_scores=False)
                results.append({
                    "question": question,
                    "tech_stacks": tech_stacks
                })
        
        return {"results": results}

    except Exception as e:
        error_msg = f"技术栈提取异常: {str(e)}"
        print(error_msg)
        return JSONResponse(
            content={"error": error_msg},
            status_code=500
        )


if __name__ == "__main__":
    import uvicorn
    
    # 显示模型加载状态
    print("=== AI面试后端服务启动 ===")
    print("模型加载状态:")
    print(f"- 情感分析模型: {'✓' if emotion_analyzer else '✗'}")
    print(f"- 压力检测模型: {'✓' if stressDetector else '✗'}")
    print(f"- ASR语音识别模型: {'✓' if asr_engine else '✗'}")
    print(f"- 多模态评分模型: {'✓' if interview_predictor else '✗'}")
    print(f"- 技术栈提取模型: {'✓' if tech_stack_predictor else '✗'}")
    print()
    print("支持的接口:")
    print("- GET  /analyze-emotion?path=<音频URL>     情感分析")
    print("- GET  /stress/detection?path=<音频URL>    压力检测") 
    print("- WebSocket /ASR                          语音识别")
    print("- POST /multimodal/scoring                多模态评分")
    print("- POST /extract-tech-stack                技术栈提取")
    print()
    print("- 服务地址: http://0.0.0.0:8765")
    print("- API文档: http://0.0.0.0:8765/docs")
    print("=" * 50)

    uvicorn.run(app, host="0.0.0.0", port=8765)