import torch
from PIL import Image
import cn_clip.clip as clip
from cn_clip.clip import load_from_name, available_models
import numpy
import requests
from fastapi import FastAPI
import json
from io import BytesIO
import uvicorn

device = "cuda" if torch.cuda.is_available() else "cpu"
#default is RN50
model, preprocess = load_from_name("RN50", device=device, download_root='./models' )

app = FastAPI()

def success(data):
    res = {
        "status": 1,
        "data": data,
        "message": "操作成功"
    }
    return res


def error(err:str):
    res = {
        "status": 0,
        "data": None,
        "message": "操作失败:" + err
    }
    return res


@app.get("/image")
async def image(url=""):
     if url == "":
         return error("没有url")
     res = requests.head(url)
     if not res.ok:
         return error("图片不存在:" + url)
     print("正在处理图片:" + url + "\n")
     data = getImageFeature(url)
     return success(data)


@app.get("/text")
async def text(text=""):
    if text == "":
        return error("没有text")
    texts = text.split(" ")
    data = getTextFeature(texts)
    return success(data)


def getImageFeature(url:str):
    response = requests.get(url)
    response = response.content
    BytesIOObj = BytesIO()
    BytesIOObj.write(response)
    image = preprocess(Image.open(BytesIOObj)).unsqueeze(0).to(device)
    with torch.no_grad():
        image_features = model.encode_image(image)
        image_features /= image_features.norm(dim=-1, keepdim=True)
    return image_features.softmax(dim=-1).cpu().detach().numpy().tolist()

def getTextFeature(words:list):
    text = clip.tokenize(words).to(device)
    text_features = model.encode_text(text)
    text_features /= text_features.norm(dim=-1, keepdim=True)
    return text_features.softmax(dim=-1).cpu().detach().numpy().tolist()



if __name__ == "__main__":
    uvicorn.run(app, host="0.0.0.0", port=8000)