Bike_prediction / app.py
Bullet500's picture
Update app.py
15639ec verified
import streamlit as st
from PIL import Image
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from langchain_groq import ChatGroq # Groq model for predictions
# Set the title and subtitle
st.title('Bike Purchase Predictions')
st.subheader("Get details and purchase suggestions for your bike.")
# Initialize Groq API with your API key
LLM = ChatGroq(
temperature=0.6,
groq_api_key='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' # Replace with your actual Groq API key
)
# Upload image of bike
uploaded_image = st.file_uploader("Upload an image of the Bike", type=["jpg", "jpeg", "png"])
# Input fields for bike details
kilometres = st.number_input("Kilometres driven", min_value=0)
owner = st.text_input("Owner")
purchase_year = st.number_input("Purchased Year", min_value=1900, max_value=2025)
brand_model = st.text_input("Brand and Model")
# Define a function to process file and make a prediction based on the uploaded bike image and details
def process_file_and_search(uploaded_file, kilometres, owner, purchase_year, brand_model):
if uploaded_file is not None:
# Display the uploaded bike image
c_image = Image.open(uploaded_file)
st.image(c_image, caption="Uploaded Bike Image", use_column_width=True)
# Create the prompt for Groq model
prompt = f"Based on the uploaded bike image and the following details:\n" \
f"1. Kilometres driven: {kilometres}\n" \
f"2. Owner: {owner}\n" \
f"3. Purchased Year: {purchase_year}\n" \
f"4. Brand and Model: {brand_model}\n" \
f"This is an Indian app, Predict the details of the bike. Should the bike be purchased? What is its in INDIA expected price? Provide the reasons for your suggestions."
# Define the prompt template for the model
B = PromptTemplate(input=['image', 'kilometres', 'owner', 'purchase_year', 'brand_model'], template=prompt)
# Format the prompt with the uploaded file and other details
D = B.format(image=uploaded_file, kilometres=kilometres, owner=owner, purchase_year=purchase_year, brand_model=brand_model)
try:
# Invoke the LLM (Groq model) with the formatted prompt
E = LLM.invoke(D)
# Display the result from the model
st.write("Prediction Result:")
st.write(E.content)
except Exception as e:
st.error(f"Error: {e}")
# Display the result when the submit button is clicked
if st.button('Submit'):
process_file_and_search(uploaded_file,kilometres,owner,purchase_year,brand_model)