from langchain import LLMChain | |
from langchain.prompts import PromptTemplate | |
from langchain.llms import HuggingFaceHub | |
# HuggingFace Repository ID | |
repo_id = 'mistralai/Mistral-7B-v0.1' | |
# μ§μλ΄μ© | |
question = "Who is Son Heung Min?" | |
# ν νλ¦Ώ | |
template = """Question: {question} | |
Answer: """ | |
# ν둬ννΈ ν νλ¦Ώ μμ± | |
prompt = PromptTemplate(template=template, input_variables=["question"]) | |
# HuggingFaceHub κ°μ²΄ μμ± | |
llm = HuggingFaceHub( | |
repo_id=repo_id, | |
model_kwargs={"temperature": 0.2, | |
"max_length": 128} | |
) | |
# LLM Chain κ°μ²΄ μμ± | |
llm_chain = LLMChain(prompt=prompt, llm=llm) | |
# μ€ν | |
print(llm_chain.run(question=question)) |