File size: 1,890 Bytes
0d09775
 
cfd5951
0d09775
 
 
a1b20d6
0048548
a1b20d6
 
3b08195
a1b20d6
 
0d09775
 
8922910
0d09775
 
 
 
8922910
 
0d09775
 
a1b20d6
0d09775
 
a1b20d6
 
 
3b08195
0d09775
0048548
0d09775
 
8922910
a1b20d6
0d09775
 
 
8922910
 
0d09775
 
0048548
 
a1b20d6
 
0048548
0d09775
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
from langchain_community.llms import OpenAI
from langchain_google_genai import ChatGoogleGenerativeAI
import streamlit as st



def get_answers(questions,model):
    st.write("running get answers function answering following questions",questions)


    answer_prompt = (f"I want you to become a teacher answer this specific Question: {questions}. You should gave me a straightforward and consise explanation and answer to each one of them")

    
    if model == "Open AI":
        llm = OpenAI(temperature=0.7, openai_api_key=st.secrets["OPENAI_API_KEY"])
        answers = llm(answer_prompt)
        # return questions
        
    elif model == "Gemini":
        llm = ChatGoogleGenerativeAI(model="gemini-pro", google_api_key=st.secrets["GOOGLE_API_KEY"])
        answers = llm.invoke(answer_prompt)
        answers = answers.content
        # return questions.content

    return(answers)    




def GetLLMResponse(selected_topic_level, selected_topic,num_quizzes, model):
    question_prompt = (f'I want you to just generate question with this specification: Generate a {selected_topic_level} math quiz on the topic of {selected_topic}. Generate only {num_quizzes} questions not more and without providing answers.')
    
    st.write("running get llm response and print question prompt",question_prompt)
    if model == "Open AI":
        llm = OpenAI(temperature=0.7, openai_api_key=st.secrets["OPENAI_API_KEY"])
        questions = llm(question_prompt)
        
        
    elif model == "Gemini":
        llm = ChatGoogleGenerativeAI(model="gemini-pro", google_api_key=st.secrets["GOOGLE_API_KEY"])
        questions = llm.invoke(question_prompt)
        questions = questions.content
        # return questions.content


    st.write("print questions",questions)
    answers = get_answers(questions,model)
    
    st.write(questions,answers)
    return(questions,answers)