Spaces:
Sleeping
Sleeping
File size: 1,342 Bytes
324aade f55656b 324aade ad96115 324aade ad96115 324aade |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 |
import os
import streamlit as st
from dotenv import load_dotenv
from langchain_openai import OpenAI # Ensure this import is correct for your setup
# Load environment variables
load_dotenv()
# Initialize the OpenAI object with your API key
api_key = os.getenv('OPEN_API_KEY') # Make sure you have this in your .env file
llm = OpenAI(api_key=api_key)
os.environ["OPENAI_API_KEY"]=os.getenv("OPENAI_API_KEY")
##LAngsmith Tracking
os.environ['LANGCHAIN_TRACING_V2']="true"
os.environ['LANGCHAIN_API_KEY']=os.getenv("LANGCHAIN_API_KEY")
def get_openai_response(question):
model_id = "gpt-3.5-turbo" # Ensure this is a valid model ID
# response = llm.create_completion(prompt=question, model=model_id) # Adjust according to actual method signature
llm=OpenAI(openai_api_key=os.environ["OPEN_API_KEY"],temperature=0.6)
response=llm(question)
return response
st.set_page_config(page_title="Q&A Demo")
st.header("Chat Open For All")
user_input = st.text_input("Input:", key="input") # Changed variable name from 'input' to 'user_input'
submit = st.button("Ask the question")
if submit and user_input: # Check if 'submit' is pressed and 'user_input' is not empty
response = get_openai_response(user_input)
st.subheader("The Response is:")
st.write(response) # Adjust based on how the response is structured
|