from dotenv import load_dotenv from langchain import HuggingFaceHub, LLMChain from langchain.prompts import PromptTemplate load_dotenv() # hub_llm = HuggingFaceHub(repo_id="mrm8488/t5-base-finetuned-wikiSQL") # prompt = PromptTemplate( # input_variables=["question"], # template="Translate English to SQL: {question}" # ) # hub_chain = LLMChain(prompt=prompt, llm=hub_llm, verbose=True) # print(hub_chain.run("What is the average age of the respondents using a mobile device?")) # second example below: hub_llm = HuggingFaceHub( repo_id='kaist-ai/selfee-7b-delta', model_kwargs={'temperature': 0.5, 'max_length': 100} ) prompt = PromptTemplate( input_variables=["profession"], template="You are about to start your carrer as {profession}, how do you feel?" ) hub_chain = LLMChain(prompt=prompt, llm=hub_llm, verbose=True) print(hub_chain.run("customer service agent")) print(hub_chain.run("politician")) print(hub_chain.run("Fintech CEO")) print(hub_chain.run("insurance agent"))