import os
from vllm import LLM, SamplingParams
from langchain_community.llms import LLMChain  # 从 langchain_community 导入 LLMChain
from langchain.prompts import PromptTemplate

# 设置环境变量，启用 ModelScope 支持
os.environ['VLLM_USE_MODELSCOPE'] = 'True'

# 定义提示词和采样参数
prompts = [
    "Hello, my name is",
    "The president of the United States is",
    "The capital of France is",
    "The future of AI is",
]

sampling_params = SamplingParams(temperature=0.8, top_p=0.95)

# 加载模型
llm = LLM(model="/data/ai/LLaMA-Factory/model/Qwen2.5-7B-Instruct/qwen/Qwen2___5-7B-Instruct/", trust_remote_code=True)

# 封装到 langchain 中的一个自定义 LLM
class VLLMChain(LLMChain):
    def _call(self, prompt: str, **kwargs):
        # 使用 vllm 生成文本
        output = llm.generate([prompt], sampling_params)
        generated_text = output[0].outputs[0].texta
        return generated_text

# 创建 LangChain LLM 实例
vllm_chain = VLLMChain(llm=llm)

# 定义一个模板并将其传递给 LangChain LLM
prompt_template = "What do you think about the following statement: {text}"
prompt = PromptTemplate(input_variables=["text"], template=prompt_template)

# 使用 LangChain 生成文本
input_text = "The future of AI is bright"
formatted_prompt = prompt.format(text=input_text)

# 调用模型
result = vllm_chain.run(formatted_prompt)
print(f"Generated text: {result}")
