File size: 1,080 Bytes
9a46d15 372b3b1 38bebd7 9d116f2 9c59245 9a46d15 ccf8138 9a46d15 ccf8138 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 |
import streamlit as st
from langchain_ollama.llms import Ollama # Updated import path
from langchain import LLMChain
from langchain.prompts import PromptTemplate
# Title for the Streamlit app
st.title("LLaMA 3.1 8B Instruct Model with Streamlit (Using LangChain & Ollama)")
# Load the Ollama model using LangChain
@st.cache_resource
def load_ollama_model():
return Ollama(model="llama3.1") # You can use other versions if needed
llama_model = load_ollama_model()
# Create a LangChain LLMChain object
prompt_template = PromptTemplate(
input_variables=["prompt"],
template="{prompt}"
)
llm_chain = LLMChain(
llm=llama_model,
prompt=prompt_template
)
# Input text from the user
user_input = st.text_area("Enter your prompt:", "")
# Generate response using the model
if st.button("Generate"):
if user_input:
# Generate response from LLMChain using LangChain and Ollama
response = llm_chain.run({"prompt": user_input})
st.text_area("Model Response:", response, height=200)
else:
st.warning("Please enter a prompt.")
|