# @FileName  : webApi.py
# @Time      : 2025/3/19 19:09
# @Author    : LuZhaoHui
# @Software  : PyCharm

from fastapi import FastAPI, File, UploadFile, Form
from fastapi.responses import StreamingResponse, JSONResponse
from transformers import Qwen2_5_VLForConditionalGeneration, AutoProcessor
from typing import List, Optional
import torch
from io import BytesIO
from PIL import Image

# Initialize the model and processor
model_dir = "./qwen2.5_vl_7B"
model = Qwen2_5_VLForConditionalGeneration.from_pretrained(
    model_dir,
    torch_dtype=torch.bfloat16,
    device_map="auto",
)
processor = AutoProcessor.from_pretrained(model_dir, max_pixels=1280 * 28 * 28)

app = FastAPI()

# Store conversation history with a maximum limit
conversation_history = []
MAX_CONVERSATION_HISTORY = 20

def process_vision_info(messages):
    """
    Helper function to extract and process image inputs from messages.
    """
    image_inputs = []
    video_inputs = []

    for msg in messages:
        for content in msg["content"]:
            if content["type"] == "image":
                # Append already loaded PIL.Image objects
                image_inputs.append(content["image"])

    return image_inputs, video_inputs

@app.post("/generate-text")
async def generate_text(
    text: str = Form(...),
    image: UploadFile = File(...),
):
    """
    Endpoint to handle user input (text + image) and generate text results.
    """
    # Set stream variable directly in the code
    stream = False

    # Load the image into memory as a PIL object
    image_bytes = await image.read()
    pil_image = Image.open(BytesIO(image_bytes))

    # Append user input to conversation history
    conversation_history.append({
        "role": "user",
        "content": [
            {
                "type": "image",
                "image": pil_image,  # Store the PIL.Image object directly
            },
            {"type": "text", "text": text},
        ],
    })

    # Ensure conversation history does not exceed the maximum limit
    if len(conversation_history) > MAX_CONVERSATION_HISTORY:
        conversation_history.pop(0)

    # Prepare the messages for the model
    input_text = processor.apply_chat_template(conversation_history, tokenize=False, add_generation_prompt=True)
    image_inputs, video_inputs = process_vision_info(conversation_history)
    inputs = processor(
        text=[input_text],
        images=image_inputs,
        videos=None,
        padding=True,
        return_tensors="pt",
    )
    inputs = inputs.to(model.device)

    # Perform inference
    if stream:
        def generate_stream():
            generated_ids = model.generate(**inputs, max_new_tokens=128)
            generated_ids_trimmed = [
                out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)
            ]
            output_text = processor.batch_decode(
                generated_ids_trimmed,
                skip_special_tokens=True,
                clean_up_tokenization_spaces=False
            )
            for chunk in output_text:
                yield chunk + "\n"

        return StreamingResponse(generate_stream(), media_type="text/plain")
    else:
        generated_ids = model.generate(**inputs, max_new_tokens=128)
        generated_ids_trimmed = [
            out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)
        ]
        output_text = processor.batch_decode(
            generated_ids_trimmed,
            skip_special_tokens=True,
            clean_up_tokenization_spaces=False
        )

        # Append model response to conversation history
        conversation_history.append({
            "role": "assistant",
            "content": [{"type": "text", "text": output_text[0]}],
        })

        # Ensure conversation history does not exceed the maximum limit
        if len(conversation_history) > MAX_CONVERSATION_HISTORY:
            conversation_history.pop(0)

        return JSONResponse(content={"output_text": output_text})

@app.post("/clear-history")
async def clear_history():
    """
    Endpoint to clear the conversation history.
    """
    global conversation_history
    conversation_history = [] # Clear the conversation history
    return JSONResponse(content={"message": "Conversation history cleared."})

if __name__ == "__main__":
    import uvicorn

    uvicorn.run(app, host="0.0.0.0", port=8000)
