# Requires vllm>=0.8.5
import torch
import vllm
from vllm import LLM
import numpy as np
import os



class Qwen3Embedder:
    def __init__(self, model_name="Qwen/Qwen3-Embedding-0.6B", task="embed", disable_log_stats=True,
                 tensor_parallel_size=1):
        self.model = LLM(model=model_name, task=task, disable_log_stats=disable_log_stats,
                         tensor_parallel_size=tensor_parallel_size)

    def embed(self, input_texts):
        outputs = self.model.embed(input_texts)
        embeddings = [o.outputs.embedding for o in outputs]
        return torch.tensor(embeddings)

    def destroy(self):
        if torch.distributed.is_initialized():
            torch.distributed.destroy_process_group()
        del self.model


def get_detailed_instruct(task_description: str, query: str) -> str:
    return f'Instruct: {task_description}\nQuery:{query}'


if __name__ == "__main__":
    # 示例任务和数据
    task = 'Obtaining medical data representation'
    data = {
        "gender": "female", "age": 34, "height": 168.2, "weight": 62.7,
        "blood_test": {"WBC": 6.8, "RBC": 4.3, "HGB": 132}
    }
    # 多个输入句子
    input_texts = [
        get_detailed_instruct(task, str(data)),
        get_detailed_instruct(task, "This is another example."),
        "Just a plain sentence without instruction."
    ]

    embedder = Qwen3Embedder()
    embeddings = embedder.embed(input_texts)
    print(embeddings)
    print(np.linalg.norm(embeddings.numpy(), axis=1))  # 输出每个embedding的模长

    embedder.destroy()
