# model_service.py
from fastapi import FastAPI
from transformers import AutoTokenizer, AutoModelForCausalLM
from sentence_transformers import SentenceTransformer
import torch
import uvicorn

app = FastAPI()

# llm模型
llm_model_name = "/workspace/models/Qwen3-1.7B"

# embedding模型
embedding_model_name = "/workspace/models/gte-Qwen2-1.5B-instruct/iic/gte_Qwen2-1___5B-instruct"


# llm模型加载
tokenizer = AutoTokenizer.from_pretrained(llm_model_name)
model = AutoModelForCausalLM.from_pretrained(
    llm_model_name,
    torch_dtype="auto",
    device_map="auto"
)


# embedding模型加载
embedding_model = SentenceTransformer(embedding_model_name)

# llm模型输出
@app.post("/generate/")
async def generate(prompt: dict):
    try:
        messages = [
            {"role": "user", "content": prompt['text']}
        ]
        text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            add_generation_prompt=True,
            enable_thinking=True # Switches between thinking and non-thinking modes. Default is True.
        )

        model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
        generated_ids = model.generate(
            **model_inputs, 
            max_new_tokens=32768
            )
        output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

        # parsing thinking content
        try:
            # rindex finding 151668 (</think>)
            index = len(output_ids) - output_ids[::-1].index(151668)
        except ValueError:
            index = 0

        thinking_content = tokenizer.decode(output_ids[:index], skip_special_tokens=True).strip("\n")
        content = tokenizer.decode(output_ids[index:], skip_special_tokens=True).strip("\n")

        return {"response": content}

    except Exception as e:
        return {"error": str(e)}


# embedding模型输出
@app.post("/embedding/")
async def generate(prompt: dict):
    try:
        query = prompt['text']
        query_embedding = embedding_model.encode([query]).astype('float32')

        return {"embedding": query_embedding.tolist()}

    except Exception as e:
        return {"error": str(e)}

if __name__ == "__main__":
    uvicorn.run(app, host="0.0.0.0", port=8000)