# Requires vllm>=0.8.5
import torch
from vllm import LLM
import numpy as np
import torch.distributed as dist




class EmbeddingBuilder:
    def __init__(self, model_name="Qwen/Qwen3-Embedding-8B", task="embed", disable_log_stats=True,
                 tensor_parallel_size=2, gpu_memory_utilization=0.9):
        self.model = LLM(model=model_name, task=task, disable_log_stats=disable_log_stats,
                         tensor_parallel_size=tensor_parallel_size, gpu_memory_utilization=gpu_memory_utilization)

    def __enter__(self):
        return self

    def __exit__(self, exc_type, exc_val, exc_tb):
        self.close()
    
    def __del__(self):
        self.close()

    def embed(self, input_texts):
        outputs = self.model.embed(input_texts)
        embeddings = [o.outputs.embedding for o in outputs]
        return torch.tensor(embeddings)

    def close(self):
        if self.model:
            if hasattr(self.model, 'shutdown'):
                self.model.shutdown()
            elif hasattr(self.model, 'llm_engine') and hasattr(self.model.llm_engine, 'shutdown'):
                self.model.llm_engine.shutdown()
            self.model = None
        
        if dist.is_initialized():
            dist.destroy_process_group()


def get_detailed_instruct(task_description: str, query: str) -> str:
    return f'Instruct: {task_description}\nQuery:{query}'