from langchain_community.llms import OpenAI from langchain_google_genai import ChatGoogleGenerativeAI import streamlit as st def get_answers(questions,model): answer_prompt = ( "I want you to become a teacher answer this specific Question:\n {questions}\n\n. You should gave me a straightforward and consise explanation and answer to each one of them") if model == "Open AI": llm = OpenAI(temperature=0.7, openai_api_key=st.secrets["OPENAI_API_KEY"]) answers = [llm(answer_prompt)] # return questions elif model == "Gemini": llm = ChatGoogleGenerativeAI(model="gemini-pro", google_api_key=st.secrets["GOOGLE_API_KEY"]) answers = (llm.invoke(answer_prompt)).content # return questions.content return(answers) def GetLLMResponse(selected_topic_level, selected_topic,num_quizzes, model): question_prompt = ('I want you to just generate question with this specification: Generate a {selected_topic_level} math quiz on the topic of {selected_topic}. Generate only {num_quizzes} questions not more and without providing answers.') if model == "Open AI": llm = OpenAI(temperature=0.7, openai_api_key=st.secrets["OPENAI_API_KEY"]) questions = [llm(question_prompt)] elif model == "Gemini": llm = ChatGoogleGenerativeAI(model="gemini-pro", google_api_key=st.secrets["GOOGLE_API_KEY"]) questions = (llm.invoke(question_prompt)).content # return questions.content answers = get_answers(questions,model) return(questions,answers)