import os import streamlit as st from dotenv import load_dotenv from langchain_openai import OpenAI # Ensure this import is correct for your setup # Load environment variables load_dotenv() # Initialize the OpenAI object with your API key api_key = os.getenv('OPEN_API_KEY') # Make sure you have this in your .env file llm = OpenAI(api_key=api_key) os.environ["OPENAI_API_KEY"]=os.getenv("OPENAI_API_KEY") ##LAngsmith Tracking os.environ['LANGCHAIN_TRACING_V2']="true" os.environ['LANGCHAIN_API_KEY']=os.getenv("LANGCHAIN_API_KEY") def get_openai_response(question): model_id = "gpt-3.5-turbo" # Ensure this is a valid model ID # response = llm.create_completion(prompt=question, model=model_id) # Adjust according to actual method signature llm=OpenAI(openai_api_key=os.environ["OPEN_API_KEY"],temperature=0.6) response=llm(question) return response st.set_page_config(page_title="Q&A Demo") st.header("Chat Open For All") user_input = st.text_input("Input:", key="input") # Changed variable name from 'input' to 'user_input' submit = st.button("Ask the question") if submit and user_input: # Check if 'submit' is pressed and 'user_input' is not empty response = get_openai_response(user_input) st.subheader("The Response is:") st.write(response) # Adjust based on how the response is structured