import torch
import os
import base64
import uvicorn
from fastapi import FastAPI, HTTPException
from fastapi.middleware.cors import CORSMiddleware
from pydantic import BaseModel
from typing import List, Optional, Union, Dict
from modelscope import Qwen2_5_VLForConditionalGeneration, AutoProcessor
from qwen_vl_utils import process_vision_info

app = FastAPI()

# 允许跨域请求
app.add_middleware(
    CORSMiddleware,
    allow_origins=["*"],
    allow_credentials=True,
    allow_methods=["*"],
    allow_headers=["*"],
)

# 全局变量存储模型和处理器
model = None
processor = None

class ImageURL(BaseModel):
    url: str

class ImageBase64(BaseModel):
    base64: str

class Message(BaseModel):
    role: str
    content: Union[str, List[Union[str, Dict[str, str]]]]

class ChatRequest(BaseModel):
    model: str
    messages: List[Message]
    max_tokens: Optional[int] = 128
    temperature: Optional[float] = 0.7

class ChatResponse(BaseModel):
    id: str
    object: str
    created: int
    model: str
    choices: List[Dict]
    usage: Dict[str, int]

def init_model():
    global model, processor
    if model is None or processor is None:
        print("正在初始化模型...")
        local_dir = os.path.expanduser("~/.cache/modelscope/hub/models/Qwen/Qwen2.5-VL-7B-Instruct")
        
        print("加载模型中...")
        processor = AutoProcessor.from_pretrained(local_dir)
        
        model = Qwen2_5_VLForConditionalGeneration.from_pretrained(
            local_dir,
            torch_dtype="auto",
            device_map="auto"
        ).eval()
        
        print("模型加载完成！")

def process_messages(messages: List[Message]):
    """处理消息列表，提取图片和文本"""
    processed_messages = []
    for msg in messages:
        if isinstance(msg.content, list):
            content_list = []
            for item in msg.content:
                if isinstance(item, dict):
                    if item.get("type") == "image":
                        # 直接使用image字段的值
                        image_url = item.get("image")
                        if image_url:
                            content_list.append({
                                "type": "image",
                                "image": image_url
                            })
                    elif item.get("type") == "text":
                        content_list.append({
                            "type": "text",
                            "text": item.get("text", "")
                        })
            processed_messages.append({
                "role": msg.role,
                "content": content_list
            })
        else:
            processed_messages.append({
                "role": msg.role,
                "content": [{
                    "type": "text",
                    "text": msg.content
                }]
            })
    return processed_messages

@app.post("/v1/chat/completions")
async def chat_completions(request: ChatRequest):
    try:
        init_model()
        
        # 处理消息
        print("收到的消息:", request.messages)
        processed_messages = process_messages(request.messages)
        print("处理后的消息:", processed_messages)
        
        # 准备推理输入
        text = processor.apply_chat_template(
            processed_messages, tokenize=False, add_generation_prompt=True
        )
        print("处理后的文本:", text)
        
        image_inputs, video_inputs = process_vision_info(processed_messages)
        print("图片输入:", image_inputs)
        
        inputs = processor(
            text=[text],
            images=image_inputs,
            videos=video_inputs,
            padding=True,
            return_tensors="pt",
        )
        inputs = inputs.to("cuda")
        
        # 生成回答
        generated_ids = model.generate(
            **inputs,
            max_new_tokens=request.max_tokens,
            temperature=request.temperature
        )
        generated_ids_trimmed = [
            out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)
        ]
        output_text = processor.batch_decode(
            generated_ids_trimmed,
            skip_special_tokens=True,
            clean_up_tokenization_spaces=False
        )[0]
        
        print("模型输出:", output_text)
        
        # 构建响应
        response = {
            "id": "chatcmpl-" + os.urandom(12).hex(),
            "object": "chat.completion",
            "created": int(torch.cuda.current_stream().query()),
            "model": request.model,
            "choices": [{
                "index": 0,
                "message": {
                    "role": "assistant",
                    "content": output_text
                },
                "finish_reason": "stop"
            }],
            "usage": {
                "prompt_tokens": len(inputs.input_ids[0]),
                "completion_tokens": len(generated_ids_trimmed[0]),
                "total_tokens": len(inputs.input_ids[0]) + len(generated_ids_trimmed[0])
            }
        }
        
        return response
        
    except Exception as e:
        print("发生错误:", str(e))
        raise HTTPException(status_code=500, detail=str(e))

if __name__ == "__main__":
    uvicorn.run("app_openai:app", host="0.0.0.0", port=8000, reload=True) 