# fastapi_deploy.py
import os
import paddle
import paddle.nn as nn
import numpy as np
from PIL import Image
from fastapi import FastAPI, File, UploadFile, Form
from pydantic import BaseModel
import uvicorn

# 模型定义（与训练代码保持一致）
class MultiModalClassifier(nn.Layer):
    def __init__(self, num_classes):
        super().__init__()
        # 图像编码器
        self.image_encoder = paddle.vision.models.resnet50(pretrained=False)
        self.image_encoder.fc = nn.Identity()
        self.image_proj = nn.Linear(2048, 512)
        
        # 文本编码器
        self.text_encoder = paddlenlp.transformers.ErnieModel.from_pretrained('ernie-1.0')
        self.text_proj = nn.Linear(768, 512)
        
        # 特征融合
        self.fusion = nn.Sequential(
            nn.Linear(1024, 512),
            nn.ReLU(),
            nn.Dropout(0.5),
            nn.Linear(512, 256),
            nn.ReLU(),
            nn.Dropout(0.5)
        )
        
        # 分类器
        self.classifier = nn.Linear(256, num_classes)
    
    def forward(self, image, input_ids, attention_mask):
        image_features = self.image_encoder(image)
        image_features = self.image_proj(image_features)
        
        text_outputs = self.text_encoder(input_ids=input_ids, attention_mask=attention_mask)
        text_features = text_outputs[1]
        text_features = self.text_proj(text_features)
        
        fused_features = paddle.concat([image_features, text_features], axis=1)
        fused_features = self.fusion(fused_features)
        logits = self.classifier(fused_features)
        return logits

# 初始化API
app = FastAPI(title="多模态分类服务", description="结合图像和文本的多模态分类API")

# 加载模型
model = MultiModalClassifier(num_classes=5)
model.set_state_dict(paddle.load("checkpoints/best_model.pdparams"))
model.eval()

# 加载tokenizer
tokenizer = paddlenlp.transformers.ErnieTokenizer.from_pretrained('ernie-1.0')

# 定义类别映射
label_map = {
    0: "科技",
    1: "娱乐",
    2: "体育",
    3: "财经",
    4: "教育"
}

# 图像预处理函数
def preprocess_image(image):
    image = image.resize((224, 224), Image.BICUBIC)
    image = np.array(image).astype('float32')
    image = image / 255.0
    image = (image - np.array([0.485, 0.456, 0.406])) / np.array([0.229, 0.224, 0.225])
    image = np.transpose(image, (2, 0, 1))
    return paddle.to_tensor(image, dtype='float32').unsqueeze(0)

# 文本预处理函数
def preprocess_text(text, max_seq_len=128):
    encoded_inputs = tokenizer(
        text=text,
        max_seq_len=max_seq_len,
        pad_to_max_seq_len=True,
        return_attention_mask=True,
        return_token_type_ids=False
    )
    input_ids = paddle.to_tensor(encoded_inputs['input_ids'], dtype='int64').unsqueeze(0)
    attention_mask = paddle.to_tensor(encoded_inputs['attention_mask'], dtype='int64').unsqueeze(0)
    return input_ids, attention_mask

# 定义请求模型
class PredictionRequest(BaseModel):
    text: str
    image_url: str = None  # 可选参数，支持URL或上传文件

# 定义响应模型
class PredictionResponse(BaseModel):
    category: str
    confidence: float
    scores: dict

# 预测API
@app.post("/predict", response_model=PredictionResponse)
async def predict(
    text: str = Form(...),
    image: UploadFile = File(...)
):
    # 处理图像
    image_content = await image.read()
    image = Image.open(BytesIO(image_content)).convert('RGB')
    image_tensor = preprocess_image(image)
    
    # 处理文本
    input_ids, attention_mask = preprocess_text(text)
    
    # 模型推理
    with paddle.no_grad():
        logits = model(image_tensor, input_ids, attention_mask)
        probs = paddle.nn.functional.softmax(logits, axis=1).numpy()[0]
    
    # 获取预测结果
    pred_id = np.argmax(probs)
    pred_category = label_map[pred_id]
    confidence = float(probs[pred_id])
    
    # 构建响应
    scores = {label_map[i]: float(probs[i]) for i in range(len(probs))}
    
    return {
        "category": pred_category,
        "confidence": confidence,
        "scores": scores
    }

# 启动服务
if __name__ == "__main__":
    uvicorn.run(app, host="0.0.0.0", port=8000)