from flask import Flask, request, jsonify
import soundfile as sf
import torch
import io
import base64
import tempfile
import os
from datetime import datetime
from modelscope import Qwen2_5OmniForConditionalGeneration, Qwen2_5OmniProcessor
from qwen_omni_utils import process_mm_info

app = Flask(__name__)

# 全局模型和处理器
model = None
processor = None

def init_model():
    """初始化模型和处理器"""
    global model, processor
    if model is None:
        model = Qwen2_5OmniForConditionalGeneration.from_pretrained(
            "Qwen/Qwen2.5-Omni-3B", 
            torch_dtype="auto", 
            device_map="auto"
        )
        processor = Qwen2_5OmniProcessor.from_pretrained("Qwen/Qwen2.5-Omni-3B")

def process_content_item(content_item):
    """处理单个内容项"""
    if content_item["type"] == "text":
        return content_item
    elif content_item["type"] == "audio":
        # 处理base64编码的音频
        if "audio" in content_item:
            audio_data = base64.b64decode(content_item["audio"])
            temp_file = tempfile.NamedTemporaryFile(suffix=".wav", delete=False)
            temp_file.write(audio_data)
            temp_file.close()
            return {"type": "audio", "audio": temp_file.name}
    elif content_item["type"] == "video":
        # 处理base64编码的视频
        if "video" in content_item:
            video_data = base64.b64decode(content_item["video"])
            temp_file = tempfile.NamedTemporaryFile(suffix=".mp4", delete=False)
            temp_file.write(video_data)
            temp_file.close()
            return {"type": "video", "video": temp_file.name}
    return content_item

@app.route('/v1/chat/completions', methods=['POST'])
def chat_completions():
    """OpenAI兼容的聊天补全接口"""
    try:
        init_model()
        
        data = request.json
        messages = data.get('messages', [])
        stream = data.get('stream', False)
        use_audio_in_video = data.get('use_audio_in_video', True)
        
        # 转换消息格式
        conversation = []
        for message in messages:
            role = message.get('role')
            content = message.get('content')
            
            if isinstance(content, str):
                # 纯文本消息
                processed_content = [{"type": "text", "text": content}]
            elif isinstance(content, list):
                # 多模态消息
                processed_content = []
                for item in content:
                    processed_item = process_content_item(item)
                    processed_content.append(processed_item)
            else:
                processed_content = [{"type": "text", "text": str(content)}]
            
            conversation.append({
                "role": role,
                "content": processed_content
            })
        
        # 如果没有系统消息，添加默认的
        if not any(msg["role"] == "system" for msg in conversation):
            conversation.insert(0, {
                "role": "system",
                "content": [
                    {
                      "type": "text", 
                      "text": """
                      你是一个康养照护情境中的异常行为预测发现系统，请你根据传入的数据预测异常行为。
请你仅返回如下格式的JSON数据
{
  "level": 3,
  "reason": "人物有明显的上肢下落倾向",
  "predict": "跌倒"
}
其中level中的1-3表示异常行为的严重等级
reason是即将发生异常行为的原因
predict是预测即将发生的异常行为
                      """
                    }
                ]
            })
        
        # 准备推理
        text = processor.apply_chat_template(conversation, add_generation_prompt=True, tokenize=False)
        audios, images, videos = process_mm_info(conversation, use_audio_in_video=use_audio_in_video)
        inputs = processor(
            text=text, 
            audio=audios, 
            images=images, 
            videos=videos, 
            return_tensors="pt", 
            padding=True, 
            use_audio_in_video=use_audio_in_video
        )
        inputs = inputs.to(model.device).to(model.dtype)
        
        # 生成响应
        with torch.no_grad():
            text_ids, audio = model.generate(**inputs, use_audio_in_video=use_audio_in_video)
        
        # 解码文本
        response_text = processor.batch_decode(text_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
        
        # 处理音频输出
        audio_base64 = None
        if audio is not None:
            audio_numpy = audio.reshape(-1).detach().cpu().numpy()
            audio_buffer = io.BytesIO()
            sf.write(audio_buffer, audio_numpy, samplerate=24000, format='WAV')
            audio_buffer.seek(0)
            audio_base64 = base64.b64encode(audio_buffer.read()).decode('utf-8')
        
        # 构造响应
        response = {
            "id": f"chatcmpl-{int(datetime.now().timestamp())}",
            "object": "chat.completion",
            "created": int(datetime.now().timestamp()),
            "model": "Qwen2.5-Omni-3B",
            "choices": [
                {
                    "index": 0,
                    "message": {
                        "role": "assistant",
                        "content": response_text
                    },
                    "finish_reason": "stop"
                }
            ],
            "usage": {
                "prompt_tokens": len(text_ids[0]) if text_ids is not None else 0,
                "completion_tokens": len(text_ids[0]) if text_ids is not None else 0,
                "total_tokens": len(text_ids[0]) * 2 if text_ids is not None else 0
            }
        }
        
        # 如果有音频输出，添加到响应中
        if audio_base64:
            response["choices"][0]["message"]["audio"] = {
                "format": "wav",
                "data": audio_base64
            }
        
        return jsonify(response)
        
    except Exception as e:
        return jsonify({
            "error": {
                "message": str(e),
                "type": "internal_error",
                "code": "internal_error"
            }
        }), 500

@app.route('/v1/models', methods=['GET'])
def list_models():
    """列出可用模型"""
    return jsonify({
        "object": "list",
        "data": [
            {
                "id": "Qwen2.5-Omni-3B",
                "object": "model",
                "created": int(datetime.now().timestamp()),
                "owned_by": "qwen",
                "permission": [],
                "root": "Qwen2.5-Omni-3B",
                "parent": None
            }
        ]
    })

@app.route('/health', methods=['GET'])
def health_check():
    """健康检查接口"""
    return jsonify({"status": "healthy"})

if __name__ == '__main__':
    print("正在启动Qwen2.5-Omni API服务器...")
    app.run(host='0.0.0.0', port=8000, debug=False)
