import streamlit as st from transformers import AutoTokenizer, AutoModelForCausalLM from langchain import PromptTemplate, HuggingFaceHub, LLMChain import os from langchain.memory import ConversationBufferMemory from langchain.utilities import WikipediaAPIWrapper os.environ['HUGGINGFACEHUB_API_TOKEN'] = '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' st.title("🦜🔗 LangChain Local test") prompt = st.text_input('Plug in your prompt here') # Set up the local LLM model using LangChain template = """Question: {question} Answer: Let's think step by step.""" prompt = PromptTemplate(template=template, input_variables=["question"]) answer_memory = ConversationBufferMemory(input_key='question', memory_key='chat_history') tokenizer = AutoTokenizer.from_pretrained("stabilityai/stablelm-base-alpha-3b") model = AutoModelForCausalLM.from_pretrained("stabilityai/stablelm-base-alpha-3b") llm_chain = LLMChain(prompt=prompt, llm=model, verbose=True, output_key='answer', memory=answer_memory) wiki = WikipediaAPIWrapper() # Show stuff to the screen if there's a prompt if prompt: title = llm_chain.run(prompt) wiki_research = wiki.run(prompt) st.write(title) with st.expander('Answer History'): st.info(answer_memory.buffer) with st.expander('Wikipedia Research'): st.info(wiki_research)