# -*- coding: utf-8 -*-
# time: 2025/4/9 10:31
# file: ch01.py
# author: hanson
"""
https://blog.csdn.net/java_leaf/article/details/147855021
流式处理
"""
from langchain_community.llms.huggingface_pipeline import HuggingFacePipeline
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline

# 加载模型和分词器
model_name = "Qwen/Qwen2.5-0.5B"  # 替换为任何HF模型
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(model_name)

# 创建 pipeline
hf_pipeline = pipeline(
    "text-generation",
    model=model,
    tokenizer=tokenizer,
    max_length=200
)

# 转换为 LangChain 可调用的对象
llm = HuggingFacePipeline(pipeline=hf_pipeline)
# 4. 定义Prompt模板
prompt = ChatPromptTemplate.from_messages([
    ("system", "你是世界级的技术专家，用简洁易懂的语言回答技术问题。"),
    ("user", "{input}")
])

# 5. 创建处理链
chain = prompt | llm | StrOutputParser()
# 调用
#response = llm.invoke("解释一下量子计算")
response = chain.invoke({"input": "解释一下量子计算"})
print(response)

