import base64
import io
import torch
from PIL import Image
from typing import List

from fastapi import FastAPI, HTTPException
from pydantic import BaseModel, Field

import cn_clip.clip as clip
from cn_clip.clip import load_from_name

print("正在初始化模型和预处理器...")
device = "cuda" if torch.cuda.is_available() else "cpu"
print(f"使用设备: {device}")

MODEL_NAME = "ViT-H-14"
DOWNLOAD_ROOT = './models'

try:
    model, preprocess = load_from_name(MODEL_NAME, device=device, download_root=DOWNLOAD_ROOT, use_modelscope=True)
    model.eval()
    print(f"模型 '{MODEL_NAME}' 加载成功！")
    # 获取向量维度
    dummy_image = preprocess(Image.new('RGB', (224, 224))).unsqueeze(0).to(device)
    with torch.no_grad():
        embedding_dim = model.encode_image(dummy_image).shape[1]
    print(f"图片向量维度: {embedding_dim}")
except Exception as e:
    print(f"模型加载失败: {e}")
    exit()

app = FastAPI(
    title="Chinese CLIP Embedding API",
    description="一个基于 Chinese-CLIP 模型的图片 Embedding 生成服务。",
    version="1.0.0"
)

class EmbeddingRequest(BaseModel):
    input: str = Field(..., description="Base64 编码的图片字符串")

class EmbeddingData(BaseModel):
    object: str = "embedding"
    embedding: List[float]
    index: int

class Usage(BaseModel):
    prompt_tokens: int = 0
    total_tokens: int

class EmbeddingResponse(BaseModel):
    object: str = "list"
    data: List[EmbeddingData]
    model: str
    usage: Usage

@app.post("/v1/embeddings", response_model=EmbeddingResponse)
def create_embeddings(request: EmbeddingRequest):
    image_b64 = request.input
    try:
        # 处理可能包含的 data URI scheme (e.g., "data:image/png;base64,")
        if ',' in image_b64:
            header, encoded = image_b64.split(',', 1)
        else:
            encoded = image_b64
        image = Image.open(io.BytesIO(base64.b64decode(encoded)))
    except (base64.binascii.Error, IOError) as e:
        raise HTTPException(status_code=400, detail=f"无法解码图片: {e}")
    except Exception as e:
        raise HTTPException(status_code=400, detail=f"处理图片时发生未知错误: {e}")
    try:
        image_tensor = preprocess(image).unsqueeze(0).to(device)
        with torch.no_grad():
            image_features = model.encode_image(image_tensor)
            # 对特征进行归一化
            image_features /= image_features.norm(dim=-1, keepdim=True)
        
        # 将 PyTorch 张量转换为 Python 列表
        embedding_list = image_features.cpu().numpy().tolist()[0]
    except Exception as e:
        raise HTTPException(status_code=500, detail=f"模型推理失败: {e}")
    response_data = EmbeddingResponse(
        data=[EmbeddingData(
            embedding=embedding_list,
            index=0
        )],
        model=MODEL_NAME,
        usage=Usage(total_tokens=len(embedding_list))
    )
    return response_data

if __name__ == '__main__':
    import uvicorn
    uvicorn.run(app, host="0.0.0.0", port=8000)
