from flask import Flask, request, jsonify
from transformers import LlavaNextProcessor, LlavaNextForConditionalGeneration
import torch
from PIL import Image
# import io
import numpy as np
import json

app = Flask(__name__)

# 初始化模型和处理器
print("init processor...")
processor = LlavaNextProcessor.from_pretrained("llava-hf/llava-v1.6-mistral-7b-hf")

print("load pretrained model...")
model = LlavaNextForConditionalGeneration.from_pretrained(
    "llava-hf/llava-v1.6-mistral-7b-hf",
    torch_dtype=torch.float16,
    low_cpu_mem_usage=True
)

print("model move to cuda device...")
model.to("cuda:0")

@app.route('/predict', methods=['POST'])
def predict():
    # 检查请求中的数据
    if 'json' not in request.form:
        return jsonify({"error": "JSON data is required"}), 400

    # 检查是否存在图像
    has_images = 'images' in request.files
    # print("files : ",request.files)

    # 解析JSON
    json_data = request.form['json']
    conversation = json.loads(json_data)
    # print(conversation)
    # print("has_images = ",has_images)

    # 读取并处理所有图像
    processed_images = []
    if has_images:
        images = request.files.getlist('images')
        for image_file in images:
            # print(image_file)
            image = Image.open(image_file)
            # 转换成3通道
            image = image.convert("RGB")
            image = np.array(image).astype(np.float16)
            processed_images.append(image)
    # print(len(processed_images))

    # 构建提示词
    # print("build prompt")
    prompt = processor.apply_chat_template(conversation, add_generation_prompt=True)

    # 将提示词和图像拼接
    if has_images:
        # print("build inputs")
        inputs = processor(text=prompt, images=processed_images, padding=True, return_tensors="pt").to("cuda:0")
        # print("build inputs ok")
    else:
        inputs = processor(text=prompt, return_tensors="pt").to("cuda:0")

    # 生成模型输出
    output = model.generate(**inputs, max_new_tokens=1000)
    result_text = processor.decode(output[0], skip_special_tokens=True)

    # 返回结果
    return jsonify({"result": result_text})

if __name__ == '__main__':
    app.run(host='0.0.0.0', port=5000)
