import os
from transformers import AutoTokenizer
from vllm import LLM, SamplingParams
from config import OPERATE_MODEL_CONFIG
from vllm.lora.request import LoRARequest


def load_vllm_model():
    # 设置CUDA_VISIBLE_DEVICES环境变量用 cuda 索引为0的显卡来推理  如果显存够用，一张显卡处理速度更快
    os.environ["CUDA_VISIBLE_DEVICES"] = OPERATE_MODEL_CONFIG.get("CUDA_VISIBLE_DEVICES", "0")

    # 加载基模
    base_model_path = OPERATE_MODEL_CONFIG['model_dir_path']

    # 下载LoRA适配器
    loRA_classify_lora = LoRARequest("classify_adapter", 1, OPERATE_MODEL_CONFIG["classifymodel_path"])
    loRA_ner_lora = LoRARequest("ner_adapter", 2, OPERATE_MODEL_CONFIG["ner_model_path"])

    # 加载基础模型，启用LoRA
    llm = LLM(
        model=base_model_path,
        enable_lora=True,
        gpu_memory_utilization=0.15
        # enforce_eager=True
    )
    tokenizer = AutoTokenizer.from_pretrained(base_model_path)

    return llm, loRA_classify_lora, loRA_ner_lora, tokenizer


def get_sampling_params():
    return SamplingParams(temperature=0.9, top_p=0.9, max_tokens=4096)
