from langchain_community.llms.huggingface_pipeline import HuggingFacePipeline
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
import torch

class BaseLLM:
    def __init__(self): 
        # 加载模型（使用自动设备选择）
        self.model = AutoModelForCausalLM.from_pretrained(
            "Qwen/Qwen2.5-1.5B-Instruct",  # 使用Hugging Face上的模型ID
            device_map="auto",
            torch_dtype=torch.float16,
            trust_remote_code=True  # Qwen模型需要这个参数
        )
        self.tokenizer = AutoTokenizer.from_pretrained(
            "Qwen/Qwen2.5-1.5B-Instruct",
            trust_remote_code=True
        )

    # 创建推理管道
    def create_pipe(self):
        pipe = pipeline(
            "text-generation",
            model=self.model,
            tokenizer=self.tokenizer,
            max_new_tokens=512
        )
        return pipe
    # 转换为LangChain模型
    def create_langchain_llm(self):
        pipe = self.create_pipe()
        llm = HuggingFacePipeline(pipeline=pipe)
        return llm

llm = BaseLLM().create_langchain_llm()
print(llm.invoke("你好，翻译我爱你为英文"))