# Q&A Chatbot from langchain_community.llms.openai import OpenAI from dotenv import load_dotenv import os import streamlit as st # Load environment variables from .env. load_dotenv() # Function to load OpenAI models and get responses def get_openai_response(question): # Instantiate the OpenAI class with necessary parameters llm = OpenAI(openai_api_key=os.getenv("OPENAI_API_KEY"), model_name="gpt-3.5-turbo-instruct", temperature=0.5) # Use the invoke method to get the response response = llm.invoke(question) return response # Initialize our Streamlit app st.set_page_config(page_title='Q&A Demo') st.header("Langchain Application") # Streamlit input field input_question = st.text_input("Input: ", key="input") # Streamlit button to submit the question submit = st.button("Ask the question") # If the "Ask the question" button is clicked if submit and input_question: # Get the response from OpenAI response = get_openai_response(input_question) # Display the response st.subheader("The Response is") st.write(response)