# Simple QnA Chatbot from langchain.llms import OpenAI from dotenv import load_dotenv from langchain_google_genai import GoogleGenerativeAI import streamlit as st import os load_dotenv() # take environment variables from .env.file # Function to load OpenAI and get response def get_openai_response(query_prompt): #llm = OpenAI(openai_api_key = os.getenv("OPENAI_API_KEY"), model = "gpt-3.5-turbo", temperature=0.9) #llm = OpenAI(openai_api_key = os.getenv("OPENAI_API_KEY"), temperature=0.9) llmGemini = GoogleGenerativeAI(gemini_api_key=os.environ["GOOGLE_API_KEY"],model="gemini-pro", temperature=0.7) #response = llm(query_prompt) response = llmGemini(query_prompt) return response # Init Streamlit st.set_page_config(page_title="QnA Chatbot", page_icon=":robot:", layout="wide") st.header("QnA Chatbot using Langchain and OpenAI") # Input area input = st.text_area("Ask any question about Langchain and OpenAI.", key="input", height=100) response = get_openai_response(input) # When user submits submit = st.button("Submit") get_openai_response(input) if submit: st.subheader("Answer: ") st.write(response)